repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ChristosChristofidis/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_frame_as_list.py
|
1
|
1083
|
import sys
sys.path.insert(1, "../../")
import h2o
def frame_as_list(ip,port):
# Connect to h2o
h2o.init(ip,port)
iris = h2o.import_frame(path=h2o.locate("smalldata/iris/iris_wheader.csv"))
prostate = h2o.import_frame(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
airlines = h2o.import_frame(path=h2o.locate("smalldata/airlines/allyears2k.zip"))
res1 = h2o.as_list(iris, use_pandas=False)
assert abs(float(res1[9][0]) - 4.4) < 1e-10 and abs(float(res1[9][1]) - 2.9) < 1e-10 and \
abs(float(res1[9][2]) - 1.4) < 1e-10, "incorrect values"
res2 = h2o.as_list(prostate, use_pandas=False)
assert abs(float(res2[7][0]) - 7) < 1e-10 and abs(float(res2[7][1]) - 0) < 1e-10 and \
abs(float(res2[7][2]) - 68) < 1e-10, "incorrect values"
res3 = h2o.as_list(airlines, use_pandas=False)
assert abs(float(res3[4][0]) - 1987) < 1e-10 and abs(float(res3[4][1]) - 10) < 1e-10 and \
abs(float(res3[4][2]) - 18) < 1e-10, "incorrect values"
if __name__ == "__main__":
h2o.run_test(sys.argv, frame_as_list)
|
apache-2.0
|
theboocock/plinkseq_utilities
|
pseq_util/old_scripts/merge_unique.py
|
4
|
2499
|
import argparse
import pandas
import sys
phenotypes = {}
def merge_phenotypes(sample_name, pheno_information,gout_header, diabetes_header):
"""
Merges using the diabetes dat, eg. l_split [1:], which is the the diabetes dataset we are using.
"""
try:
temp_row = phenotypes[sample_name]
except KeyError:
phenotypes[sample_name] = [' ' for i in range(len(gout_header))]
for i, new in enumerate(diabetes_header):
try:
index = gout_header.index(new)
#print(i)
#print(pheno_information)
#print(new)
#print(index)
#print(len(pheno_information))
#print(phenotypes[sample_name])
#print(len(diabetes_header))
#print(diabetes_header)
phenotypes[sample_name][index] = pheno_information[i]
except ValueError:
continue
#sys.stderr.write(new + ' does not exist\n')
def update_diabetes_info(diabetes, gout_header):
"""
Update diabetes information
"""
with open(diabetes) as f:
d_header = []
for i, line in enumerate(f):
line = line.replace('\n','')
l_split = line.split('\t')
if ( i == 0 ):
d_header = l_split[1:]
else:
samp_name = l_split[0]
merge_phenotypes(samp_name, l_split[1:], gout_header, d_header)
return(d_header)
def phenotype_information(gout_input):
"""
add_phenotype_information for new samples
"""
with open(gout_input) as f:
phenotype_header = []
for i, line in enumerate(f):
line = line.replace('\n','')
l_split = line.split('\t')
if ( i == 0 ):
phenotype_header = l_split[1:]
else:
samp_name = l_split[0]
phenotypes[samp_name] = l_split[1:]
return(phenotype_header)
def print_phenotypes(header):
print('PATIENT'+'\t'+'\t'.join(header))
for key, value in phenotypes.items():
print(key +'\t' +'\t'.join(value))
def main():
parser = argparse.ArgumentParser(description="Get Unique columns")
parser.add_argument('-g','--gout',dest='gout')
parser.add_argument('-d','--diabetes',dest='diabetes')
args = parser.parse_args()
header = phenotype_information(args.gout)
update_diabetes_info(args.diabetes, header)
print_phenotypes(header)
if __name__=="__main__":
main()
|
mit
|
nkeim/runtrackpy
|
runtrackpy/track.py
|
1
|
16828
|
"""Utilities to drive the Caswell Python tracking code (as modified by NCK).
Functions of note:
identify_frame() previews feature identification.
track2disk() implements a complete tracking workflow.
The 'params' dictionaries required below have the following options:
For identification:
'identmod': Name of module in which to find identification function.
Default: This one.
'identfunc': Name of identification function.
Default: basic bandpass-supbixel algorithm.
'maxgray': Maximum grayscale value of images (default 0 -> best guess)
'bright': 0 -> dark particles on light background (default); 1 -> inverse
[Depending on 'identfunc', the following parameters may be different.]
'featsize': Expected particle feature radius
'bphigh': Scale, in pixels, for smoothing images and making them more lumpy
'maxrg': Cutoff for particle radius of gyration --- how extended particle is
'threshold': Ignore pixels smaller than this value
'merge_cutoff': Merge features that are too close to each other.
For tracking:
'maxdisp': Radius of region in which to look for a particle in the next frame.
Set too high, and the algorithm will be overwhelmed with possible matches.
'memory': How many frames a particle can skip, and still be identified if it has
not moved past 'maxdisp'.
The 'window' dictionaires limit where and when to look for particles.
Items 'xmin', 'xmax', 'ymin', and 'ymax' set the spatial limits. 'firstframe'
and 'lastframe' set the range of frames to track, inclusive; the first frame
is numbered 1. All values are optional.
If you are using the runtrackpy.run module, these can be stored in "trackpy.ini"
and "window.ini" in each movie directory.
Simple example of a do-it-yourself tracking pipeline:
params = dict(featsize=3, bphigh=0.7, maxrg=100, maxdisp=3)
mytracks = list(link_dataframes(feature_iter(enumerate(allfiles), params), params))
# 'mytracks' can then be combined into a single DataFrame with the append() method.
See track2disk() for something much more user-friendly.
"""
# Copyright 2013 Nathan C. Keim
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 3 of the License, or (at
#your option) any later version.
#
#This program is distributed in the hope that it will be useful, but
#WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, see <http://www.gnu.org/licenses>.
import os, sys, itertools, importlib
import numpy as np
import scipy.misc
from scipy.spatial import cKDTree
import pandas, tables
import trackpy.feature, trackpy.linking, trackpy.predict
from . import identification
from .util import readSingleCfg
from .statusboard import StatusFile, Stopwatch, format_td
# I've coded this with 32-bit floats to save disk space and bandwidth.
# The native data type of pandas is a 64-bit float.
# If you have more than ~10^7 particles and/or frames, you want 64 bits.
# Just remove all the casts to 'float32' and 'float64', and redefine the pytables
# columns as Float64Col, and you should be all set.
# Note that there will be pandas trouble if the pytables definition mixes
# 32- and 64-bit fields.
# Nuts and bolts of individual tracking operations
def identify_frame(im, params, window=None):
"""Identify features in image array 'im' and return a DataFrame
Uses the function determined and loaded by get_identify_function().
Loading that function can be expensive, so it's better to get the function
returned by get_identify_function() and use that yourself if many
frames are to be processed.
"""
if float(params.get('bright', 0)):
im = 1 - im
return get_identify_function(params)(im, params, window=window)
def get_identify_function(params):
"""Based on the 'identfunc' and 'identmod' elements in 'params', decide which
function to use, then pass 'params' to that function. That function
interprets 'params', processes 'im', and returns a DataFrame with at least
columns 'x' and 'y', and hopefully 'intensity' and 'rg2'.
If 'identmod' or 'identfunc' is not specified, the defaults are "."
and "identify_frame_basic", respectively, where "." refers to this module.
Because 'identmod' is imported after this module is loaded, it can import
this module and use functions defined in it, particularly postprocess_features().
"""
identfunc_name = params.get('identfunc', 'identify_frame_basic')
identmod_name = params.get('identmod', '.')
if identmod_name == '.':
return globals()[identfunc_name]
else:
module = importlib.import_module(identmod_name)
module = reload(module)
return getattr(module, identfunc_name)
def identify_frame_basic(im, params, window=None):
"""Basic bandpass-subpixel feature identification procedure.
For cropping the features, 'window' can be a dict of xmin, xmax, ymin, ymax,
or "file", which uses the "window.ini" file in the current directory.
See module docs for 'params'
"""
# Parameters
featsize = int(params.get('featsize', 3))
bphigh = float(params.get('bphigh', 0.7))
bplow = int(params.get('bplow', featsize))
threshold = float(params.get('threshold', 1e-15))
# Feature identification
imbp = identification.band_pass(im, bplow, bphigh)
lm = identification.find_local_max(imbp, featsize, threshold=threshold)
lmcrop = identification.local_max_crop(imbp, lm, featsize)
pos, m, r2 = identification.subpixel_centroid(imbp, lmcrop, featsize, struct_shape='circle')
# Munging
df = pandas.DataFrame({'x': pos[0,:], 'y': pos[1,:], 'intensity': m, 'rg2': r2})
return postprocess_features(df, params, window=window)
def postprocess_features(df, params, window=None):
"""Apply standard cuts, cropping, merging to a features DataFrame."""
# This could be used by custom feature identification functions defined in
# other files.
maxrg = float(params.get('maxrg', np.inf))
merge_cutoff = float(params.get('merge_cutoff', -1))
# Radius of gyration cut
feats = df[df.rg2 <= maxrg]
# Apply crop window
if window is not None:
if window == 'file':
window = get_window()
feats = feats[(feats.x > window['xmin']) & (feats.x < window['xmax']) & \
(feats.y > window['ymin']) & (feats.y < window['ymax'])]
# Merge nearby particles
if merge_cutoff <= 0:
return feats
else:
return merge_groups(feats, merge_cutoff)
def feature_iter(filename_pairs, params, window=None):
"""Convert a sequence of (frame number, filename) into a sequence of features data.
Note that this uses the track.imread(), not that from e.g. pylab."""
for fnum, filename in filename_pairs:
# NOTE that this imread is not like the matplotlib version, which is
# already normalized.
# We use this version because importing matplotlib is very expensive.
ftr = identify_frame(imread(filename, params),
params, window=window)
yield fnum, ftr
def imread(filename, params=None):
"""Load a single image, normalized to the range (0, 1).
Attempts to replicate matplotlib.imread() without matplotlib.
Uses "maxgray" in 'params', if available.
"""
if params is None: params = {}
imraw = scipy.misc.imread(filename)
mg = float(params.get('maxgray', 0))
if not mg: # Guess
if imraw.dtype.name == 'uint8':
mg = 2**8 - 1
elif imraw.dtype.name == 'uint16':
mg = 2**16 - 1
elif imraw.dtype.name.startswith('float'):
mg = 1.0
else:
raise ValueError("Can't guess max gray value of image. Use parameter 'maxgray'.")
return imraw / float(mg)
def merge_groups(feats, merge_cutoff):
"""Post-process a DataFrame to merge features within 'merge_cutoff' of each other.
Sums the values in the 'intensity' column, and sets 'rg2' to NaN.
Uses a crude algorithm that merges any close neighbors it encounters. This means
that extended cluters of multiple features may not be completely merged, if a
feature at the edge of the cluster is examined first.
"""
xy = feats[['x', 'y']].values
masses = feats[['intensity']].values
rg2 = feats[['rg2']].values
ckdtree = cKDTree(xy, 5)
for i in range(len(xy)):
dists, nns = ckdtree.query(xy[i], k=6, distance_upper_bound=merge_cutoff) # Groups of up to 6 features
if dists[1] < 1e23:
nnids_all = nns.compress(~np.isinf(dists))
# Exclude already-processed neighbors, whose rg2's have been nulled.
nnids = [nn for nn in nnids_all if not np.isnan(rg2[nn])]
if len(nnids) > 1:
xy[i,:] = np.mean(xy[nnids], axis=0)
masses[i] = np.sum(masses[nnids])
# It's not clear how to merge rg2. So we use it
# to mark features as merged, and allow cutting of those rows
rg2[nnids[1:]] = np.nan
feats_merged = feats.copy()
feats_merged['x'] = xy[:,0]
feats_merged['y'] = xy[:,1]
feats_merged['intensity'] = masses
feats_merged['rg2'] = rg2
return feats_merged.dropna()
def get_window(winfilename='window.ini'):
"""Returns the contents of 'window.ini' as a dict of integers.
Indices in window.ini are interpreted ImageJ-style (1-based).
Caller must be prepared to interpret "-1" as "end"."""
return interpret_window(readSingleCfg(winfilename))
def interpret_window(windowdict):
"""Fill in missing or special values in a window specification.
This processes the content of "window.ini"."""
win = dict(xmin=float(windowdict.get('xmin', 1)) - 1,
xmax=float(windowdict.get('xmax', -1)) - 1,
ymin=float(windowdict.get('ymin', 1)) - 1,
ymax=float(windowdict.get('ymax', -1)) - 1,
firstframe=int(windowdict.get('firstframe', 1)),
lastframe=int(windowdict.get('lastframe', -1)))
if win['xmax'] < 0:
win['xmax'] = np.inf
if win['ymax'] < 0:
win['ymax'] = np.inf
return win
def link_dataframes(points, params):
"""Takes an iterator of (framenumber, DataFrame) tuples.
Requires columns 'x', 'y'.
Returns an iterable of DataFrames, now with 'particle' and 'frame' columns.
See module docs for 'params'
"""
search_range = float(params.get('maxdisp', None))
memory = int(params.get('memory', 0))
predict = params.get('predict')
if not predict:
predictor = None
elif predict == 'nearest':
predictor = trackpy.predict.NearestVelocityPredict()
elif predict == 'channel':
predictor = trackpy.predict.ChannelPredict(int(params.get('predict_channel_bin_size', 50)))
else:
raise ValueError('predict parameter must be "nearest" or nothing.')
if 'predictor' in params:
predictor = params['predictor']
if predictor is not None:
linker = predictor.link_df_iter
else:
linker = trackpy.linking.link_df_iter
def prepareFrame(frame, fnum):
frame = frame.copy()
frame['frame'] = fnum
return frame
return linker((prepareFrame(fr, fn) for fn, fr in points),
search_range,
memory=memory,
neighbor_strategy='KDTree',
link_strategy='auto',
retain_index=True)
# An entire tracking pipeline, including storage to disk
def track2disk(imgfilenames, outfilename, params, selectframes=None,
window=None, progress=False, statusfile=None):
"""Implements a complete tracking process, from image files to a complete
pytables (HDF5) database on disk.
Appropriate for large datasets.
'imgfilenames' is the complete list of image files.
'outfilename' conventionally has the ".h5" extension.
See module docs for 'params' and 'window'.
'selectframes' is a list of frame numbers to use, COUNTING FROM 1. Default is all.
If 'progress', a status message will be displayed in IPython.
'statusfile' optionally creates a JSON file that is continually updated with status
information.
NOTE: track.imread() is used to read the image files. This does not always behave
as the more familiar imread() in pylab.
"""
try: # Always close output file
outfile = None
if os.path.exists(outfilename): # Check now *and* later
raise IOError('Output file already exists.')
filepairs_all = [(i + 1, filename) for i, filename in enumerate(imgfilenames)]
if selectframes is None:
filepairs = filepairs_all
else:
filepairs = [filepairs_all[i - 1] for i in selectframes]
if statusfile is not None:
stopwatch = Stopwatch()
statfile = StatusFile(statusfile,
dict(totalframes=len(filepairs), outfile=outfilename,
working_dir=os.getcwd(), process_id=os.getpid(),
started=stopwatch.started))
statfile.update(dict(status='starting'))
tracks_iter = link_dataframes(feature_iter(filepairs, params, window=window),
params)
for loopcount, ((fnum, filename), ftr) in enumerate(itertools.izip(filepairs, tracks_iter)):
if statusfile is not None:
stopwatch.lap()
statfile.update(dict(status='working', mr_frame=fnum, mr_imgfile='filename',
nparticles=len(ftr), seconds_per_frame=stopwatch.mean_lap_time(),
elapsed_time=format_td(stopwatch.elapsed()),
time_left=format_td(stopwatch.estimate_completion(len(filepairs)))))
if progress:
import IPython.display
IPython.display.clear_output()
print '{} particles in frame {} ({} of {}): {}'.format(
len(ftr), fnum, loopcount+1, len(filepairs), filename)
sys.stdout.flush()
if outfile is None:
# We create the output file now so we can provide an estimate of total size.
if os.path.exists(outfilename):
raise IOError('Output file already exists.')
outfile = tables.openFile(outfilename, 'w')
alltracks = outfile.createTable('/', 'bigtracks', TrackPoint,
expectedrows=len(ftr) * len(imgfilenames),)
#filters=tables.Filters(complevel=5, complib='blosc'))
alltracks.append(
ftr[['frame', 'particle',
'x', 'y', 'intensity', 'rg2']].values.astype('float32'))
alltracks.flush()
if statusfile is not None:
statfile.update(dict(status='finishing',
elapsed_time=format_td(stopwatch.elapsed()),
seconds_per_frame=stopwatch.mean_lap_time()))
if outfile is not None:
_create_table_indices(alltracks)
finally:
if outfile is not None:
outfile.close()
if statusfile is not None:
statfile.update(dict(status='done',
elapsed_time=format_td(stopwatch.elapsed()),
seconds_per_frame=stopwatch.mean_lap_time()))
# Tracks file indexing
def create_tracksfile_indices(tracksfilename):
"""Create indices for the tracks data in the HDF5 file 'tracksfilename'.
Indices are necessary to efficiently access the data.
This is only necessary if the normal tracking process (with track2disk())
did not finish successfully.
"""
outfile = tables.openFile(tracksfilename, 'a')
try:
trtab = outfile.root.bigtracks
_create_table_indices(trtab)
finally:
outfile.close()
def _create_table_indices(trackstable):
"""Create indices on the tracks PyTables table."""
trackstable.cols.frame.createIndex()
trackstable.cols.particle.createIndex()
# Format of the tracks data file
class TrackPoint(tables.IsDescription):
"""pytables format for tracks data"""
frame = tables.Float32Col(pos=1)
particle = tables.Float32Col(pos=2)
x = tables.Float32Col(pos=3)
y = tables.Float32Col(pos=4)
intensity = tables.Float32Col(pos=5)
rg2 = tables.Float32Col(pos=6)
|
gpl-3.0
|
WhittKinley/aima-python
|
submissions/Sery/myNN.py
|
13
|
3375
|
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Sery import aids
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
aidsECHP = DataFrame()
aidsECHP.data = []
target_data = []
list_of_report = aids.get_reports()
for record in list_of_report:
try:
prevalence = float(record['Data']["HIV Prevalence"]["Adults"])
target_data.append(prevalence)
year = int(record['Year'])
living = int(record['Data']["People Living with HIV"]["Adults"])
new = int(record['Data']["New HIV Infections"]["Adults"])
deaths = int(record['Data']["AIDS-Related Deaths"]["Adults"])
aidsECHP.data.append([year, living, new, deaths])
except:
traceback.print_exc()
aidsECHP.feature_names = [
'Year',
'People Living with HIV',
'New HIV Infections',
'AIDS-Related Deaths',
]
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
aidsECHP.target = []
def aidsTarget(percentage):
if percentage > 6:
return 1
return 0
for pre in target_data:
# choose the target
tt = aidsTarget(pre)
aidsECHP.target.append(tt)
aidsECHP.target_names = [
'HIV Prevalence <= 6%',
'HIV Prevalence > 6%',
]
'''
Make a customn classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (120,),
activation = 'relu',
solver='sgd', # 'adam',
alpha = 0.00001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1200, # 200,
shuffle = True,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
# momentum = 0.9,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
# beta_1 = 0.9,
# beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
aidsScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(aidsECHP.data)
aidsScaled.data = scaleGrid(aidsECHP.data)
aidsScaled.feature_names = aidsECHP.feature_names
aidsScaled.target = aidsECHP.target
aidsScaled.target_names = aidsECHP.target_names
Examples = {
'AidsDefault': {
'frame': aidsECHP,
},
'AidsSGD': {
'frame': aidsECHP,
'mlpc': mlpc
},
'AidsScaled': {
'frame': aidsScaled,
},
}
|
mit
|
prabhatsaini91/NLP-TA
|
Sentiment Analysis/BagOfWords.py
|
1
|
3751
|
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
import pandas as pd
import numpy as np
if __name__ == '__main__':
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0, \
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t", \
quoting=3 )
#print 'The first review is:'
#print train["review"][0]
#raw_input("Press Enter to continue...")
#print 'Download text data sets. If you already have NLTK datasets downloaded, just close the Python download window...'
#nltk.download() # Download text data sets, including stop words
# Initialize an empty list to hold the clean reviews
clean_train_reviews = []
# Loop over each review; create an index i that goes from 0 to the length
# of the movie review list
print "Cleaning and parsing the training set movie reviews...\n"
for i in xrange( 0, len(train["review"])):
clean_train_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(train["review"][i], True)))
# ****** Create a bag of words from the training set
#
print "Creating the bag of words...\n"
# Initialize the "CountVectorizer" object, which is scikit-learn's
# bag of words tool.
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
# Numpy arrays are easy to work with, so convert the result to an
# array
train_data_features = train_data_features.toarray()
# ******* Train a random forest using the bag of words
#
print "Training the random forest (this may take a while)..."
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators = 100)
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
#
# This may take a few minutes to run
forest = forest.fit( train_data_features, train["sentiment"] )
# Create an empty list and append the clean reviews one by one
clean_test_reviews = []
print "Cleaning and parsing the test set movie reviews...\n"
for i in xrange(0,len(test["review"])):
clean_test_reviews.append(" ".join(KaggleWord2VecUtility.review_to_wordlist(test["review"][i], True)))
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
print "Predicting test labels...\n"
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv(os.path.join(os.path.dirname(__file__), 'data', 'Bag_of_Words_model.csv'), index=False, quoting=3)
print "Wrote results to Bag_of_Words_model.csv"
|
mit
|
aewhatley/scikit-learn
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
3369
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
bsd-3-clause
|
hrjn/scikit-learn
|
sklearn/tests/test_kernel_approximation.py
|
78
|
7586
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
|
bsd-3-clause
|
andrewnc/scikit-learn
|
sklearn/tests/test_calibration.py
|
213
|
12219
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
|
bsd-3-clause
|
evanbiederstedt/RRBSfun
|
scripts/PDR_methyl_normal_mcell_by_stack1.py
|
1
|
2033
|
import glob
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import os
os.chdir('/Users/evanbiederstedt/Downloads/RRBS_data_files')
# set glob subdirectory via cell batch
mcells = glob.glob("RRBS_NormalBCD19pCD27mcell*")
for filename in mcells:
df = pd.read_table(filename)
df = df.drop(['chr', 'start', 'strand', 'avgWeightedEnt', 'CpGEntropy', 'tss', 'tssDistance', 'genes', 'exons',
'introns', 'promoter', 'cgi',
'geneDensity', 'ctcfUpstream', 'ctcfDownstream','ctcfDensity', 'geneDistalRegulatoryModules',
'vistaEnhancers', '3PrimeUTR', 'ctcfUpDistance', 'ctcfDownDistance','3PrimeUTRDistance',
'5PrimeUTR', '5PrimeUTRDistance', 'firstExon','geneDistalRegulatoryModulesK562',
'geneDistalRegulatoryModulesK562Distance', 'hypoInHues64','hypoInHues64Distance',
'genesDistance', 'exonsDistance', 'intronsDistance', 'promoterDistance', 'cgiDistance',
'ctcf', 'ctcfDistance', 'geneDistalRegulatoryModulesDistance', 'vistaEnhancersDistance', 'firstExonDistance'], axis=1)
num_bins2 = np.ceil(df['avgReadCpGs'].max()/1.25)
df['avgReadCpGs_binned'] = pd.cut(df['avgReadCpGs'], num_bins2, labels=False)
df['read_stack_ID'] = (df.avgReadCpGs_binned.shift(1) != df.avgReadCpGs_binned).astype(int).cumsum()
df['total_reads'] = df[["methReadCount", "unmethReadCount", "mixedReadCount"]].sum(axis=1)
df['avgReadCpGs'] = df['avgReadCpGs'].values.round(decimals=0)
df1 = df.groupby(['read_stack_ID', 'avgReadCpGs'])[['thisMeth', 'thisUnmeth', 'methReadCount', 'unmethReadCount', 'mixedReadCount', 'total_reads']].sum()
df1.reset_index(inplace=True)
df1["methylation"] = df1["thisMeth"]/(df1["thisMeth"]+df1["thisUnmeth"]) # corrected
df1["PDR_per_stack"] = df1["mixedReadCount"]/df1["total_reads"]
df1.to_csv(str("stacked_") + str(filename) +str(".csv"))
|
mit
|
EPFL-LCSB/pytfa
|
docker/mplimporthook.py
|
16
|
1654
|
"""Startup script for IPython kernel.
Installs an import hook to configure the matplotlib backend on the fly.
Originally from @minrk at
https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
Repurposed for docker-stacks to address repeat bugs like
https://github.com/jupyter/docker-stacks/issues/235.
"""
import sys
from IPython import get_ipython
class MatplotlibFinder(object):
"""Import hook that notices when matplotlib.pyplot or pylab is imported
and tries to configure the matplotlib backend appropriately for the
environment.
"""
_called = False
def find_module(self, fullname, path=None):
if self._called:
# already handled
return
if fullname not in ('pylab', 'matplotlib.pyplot'):
# not matplotlib
return
# don't call me again
self._called = True
try:
# remove myself from the import hooks
sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
except ValueError:
pass
ip = get_ipython()
if ip is None:
# not in an interactive environment
return
if ip.pylab_gui_select:
# backend already selected
return
if hasattr(ip, 'kernel'):
# default to inline in kernel environments
ip.enable_matplotlib('inline')
else:
print('enabling matplotlib')
ip.enable_matplotlib()
# install the finder immediately
sys.meta_path.insert(0, MatplotlibFinder())
|
apache-2.0
|
jereze/scikit-learn
|
sklearn/metrics/metrics.py
|
233
|
1262
|
import warnings
warnings.warn("sklearn.metrics.metrics is deprecated and will be removed in "
"0.18. Please import from sklearn.metrics",
DeprecationWarning)
from .ranking import auc
from .ranking import average_precision_score
from .ranking import label_ranking_average_precision_score
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
|
bsd-3-clause
|
youprofit/scikit-image
|
doc/examples/plot_equalize.py
|
18
|
2786
|
"""
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.subplots_adjust(wspace=0.4)
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/indexes/timedeltas/test_timedelta_range.py
|
1
|
3093
|
import numpy as np
import pytest
from pandas import Timedelta, timedelta_range, to_timedelta
import pandas._testing as tm
from pandas.tseries.offsets import Day, Second
class TestTimedeltas:
def test_timedelta_range(self):
expected = to_timedelta(np.arange(5), unit="D")
result = timedelta_range("0 days", periods=5, freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(11), unit="D")
result = timedelta_range("0 days", "10 days", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day()
result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D")
tm.assert_index_equal(result, expected)
expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2)
result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D")
tm.assert_index_equal(result, expected)
expected = to_timedelta(np.arange(50), unit="T") * 30
result = timedelta_range("0 days", freq="30T", periods=50)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"periods, freq", [(3, "2D"), (5, "D"), (6, "19H12T"), (7, "16H"), (9, "12H")]
)
def test_linspace_behavior(self, periods, freq):
# GH 20976
result = timedelta_range(start="0 days", end="4 days", periods=periods)
expected = timedelta_range(start="0 days", end="4 days", freq=freq)
tm.assert_index_equal(result, expected)
assert result.freq == freq
def test_errors(self):
# not enough params
msg = (
"Of the four parameters: start, end, periods, and freq, "
"exactly three must be specified"
)
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(end="5 days")
with pytest.raises(ValueError, match=msg):
timedelta_range(periods=2)
with pytest.raises(ValueError, match=msg):
timedelta_range()
# too many params
with pytest.raises(ValueError, match=msg):
timedelta_range(start="0 days", end="5 days", periods=10, freq="H")
@pytest.mark.parametrize(
"start, end, freq, expected_periods",
[
("1D", "10D", "2D", (10 - 1) // 2 + 1),
("2D", "30D", "3D", (30 - 2) // 3 + 1),
("2s", "50s", "5s", (50 - 2) // 5 + 1),
# tests that worked before GH 33498:
("4D", "16D", "3D", (16 - 4) // 3 + 1),
("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1),
],
)
def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods):
# GH 33498 only the cases where `(end % freq) == 0` used to fail
res = timedelta_range(start=start, end=end, freq=freq)
assert Timedelta(start) == res[0]
assert Timedelta(end) >= res[-1]
assert len(res) == expected_periods
|
bsd-3-clause
|
aayushidwivedi01/spark-tk
|
regression-tests/sparktkregtests/testcases/dicom/dicom_extract_keyword_test.py
|
13
|
7033
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.extract_keywords functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
from lxml import etree
class DicomExtractKeywordsTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomExtractKeywordsTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.count = self.dicom.metadata.count()
def test_extract_one_column_basic(self):
"""test extract keyword with one col"""
self.dicom.extract_keywords(["PatientID"])
# ensure column was added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
# compare expected results with extract_keywords result
expected_result = self._get_expected_column_data_from_xml(["PatientID"])
take_result = self.dicom.metadata.take(self.count, columns=['PatientID'])
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_multiple_columns_basic(self):
"""test extract keywords with mult cols"""
keywords = ["PatientID", "SOPInstanceUID"]
self.dicom.extract_keywords(keywords)
# ensure columns were added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
if u'SOPInstanceUID' not in columns:
raise Exception("SOPInstanceUID not added to columns")
# compare expected and actual result
expected_result = self._get_expected_column_data_from_xml(keywords)
take_result = self.dicom.metadata.take(self.count, columns=keywords)
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_invalid_column(self):
"""test extract keyword with invalid column"""
self.dicom.extract_keywords(["invalid"])
# ensure column was added
columns = self.dicom.metadata.column_names
if u'invalid' not in columns:
raise Exception("Invalid column not added")
# compare expected and actual result
invalid_column = self.dicom.metadata.take(self.count, columns=[u'invalid'])
expected_result = [[None] for x in range(0, self.count)]
self.assertEqual(invalid_column, expected_result)
def test_extract_multiple_invalid_columns(self):
"""test extract keyword mult invalid cols"""
keywords = ["invalid", "another_invalid_col"]
self.dicom.extract_keywords(keywords)
# test that columns were added
columns = self.dicom.metadata.column_names
if u'invalid' not in columns:
raise Exception("invalid column not added to columns")
if u'another_invalid_col' not in columns:
raise Exception("another_invalid_col not added to columns")
# compare actual with expected result
invalid_columns = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = [[None, None] for x in range(0, self.count)]
self.assertEqual(invalid_columns, expected_result)
def test_extract_invalid_valid_col_mix(self):
keywords = ["PatientID", "Invalid"]
self.dicom.extract_keywords(keywords)
# test that columns were added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
if u'Invalid' not in columns:
raise Exception("Invalid not added to columns")
# compare actual with expected result
take_result = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = self._get_expected_column_data_from_xml(keywords)
numpy.testing.assert_equal(take_result, expected_result)
def test_extract_invalid_type(self):
with self.assertRaisesRegexp(Exception, "should be either str or list"):
self.dicom.extract_keywords(1)
def test_extract_unicode_columns(self):
keywords = [u'PatientID']
self.dicom.extract_keywords(keywords)
# test that column was added
columns = self.dicom.metadata.column_names
if u'PatientID' not in columns:
raise Exception("PatientID not added to columns")
# compare actual with expected result
take_result = self.dicom.metadata.take(self.count, columns=keywords)
expected_result = self._get_expected_column_data_from_xml(keywords)
numpy.testing.assert_equal(take_result, expected_result)
def _get_expected_column_data_from_xml(self, tags):
# generate expected data by extracting the keywords ourselves
expected_column_data = []
# download to pandas for easy access
metadata_pandas = self.dicom.metadata.to_pandas()
# iterate through the metadata rows
for index, row in metadata_pandas.iterrows():
# convert metadata to ascii string
metadata = row["metadata"].encode("ascii", "ignore")
# create a lxml tree object from xml metadata
xml_root = etree.fromstring(metadata)
expected_row = []
for tag in tags:
# for lxml the search query means
# look for all DicomAttribute elements with
# attribute keyword equal to our keyword
# then get the value element underneath it and extract the
# inner text
tag_query = ".//DicomAttribute[@keyword='" + tag + "']/Value/text()"
query_result = xml_root.xpath(tag_query)
# if result is [] use None, otherwise format in unicode
result = query_result[0].decode("ascii", "ignore") if query_result else None
expected_row.append(result)
#expected_row.append(query_result)
expected_column_data.append(expected_row)
return expected_column_data
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
mayblue9/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
mhue/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
142
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
hsiaoyi0504/scikit-learn
|
sklearn/covariance/tests/test_robust_covariance.py
|
213
|
3359
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
|
bsd-3-clause
|
lurenlym/BPnetwork_smartcar
|
computer/computer_mlp_training.py
|
1
|
3490
|
__author__ = 'Mind'
import cv2
import numpy as np
import glob
import matplotlib.pyplot as plt
print('Loading training data...')
e0 = cv2.getTickCount()
# load training data
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 11), 'float')
training_data = glob.glob('training_data_temptest0407.npz')
for single_npz in training_data:
with np.load(single_npz) as data:
print(data.files)
train_temp = data['train']
train_labels_temp = data['train_labels']
print(train_temp.shape)
print(train_labels_temp.shape)
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
train = image_array[1:, :]
train_labels = label_array[1:, :]
print(train.shape)
print(train_labels.shape)
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 11), 'float')
training_data = glob.glob('training_data_temptest0408.npz')
for single_npz in training_data:
with np.load(single_npz) as data:
print(data.files)
train_temp = data['train']
train_labels_temp = data['train_labels']
print(train_temp.shape)
print(train_labels_temp.shape)
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
test = image_array[1:, :]
test_labels = label_array[1:, :]
print(test.shape)
print(test_labels.shape)
e00 = cv2.getTickCount()
time0 = (e00 - e0)/ cv2.getTickFrequency()
print('Loading image duration:%f'%time0)
# set start time
e1 = cv2.getTickCount()
# create MLP
layer_sizes = np.int32([38400, 50, 11]) #250:92.40 200:86.38 50:81.93
model = cv2.ml.ANN_MLP_create()
model.setLayerSizes(layer_sizes)
model.setTrainMethod(cv2.ml.ANN_MLP_BACKPROP)
model.setBackpropMomentumScale(0.01)
model.setBackpropWeightScale(0.1)
model.setTermCriteria((cv2.TERM_CRITERIA_COUNT| cv2.TERM_CRITERIA_EPS, 50, 0.001))
model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 0.2, 1)
#model.setActivationFunction(cv2.ml.ANN_MLP_SIGMOID_SYM, 0.5, 1)
print('Training MLP ...')
num_iter = model.train(np.float32(train), cv2.ml.ROW_SAMPLE, np.float32(train_labels))
# set end time
e2 = cv2.getTickCount()
time = (e2 - e1)/cv2.getTickFrequency()
print('Training duration:%f'% time)
# save param
model.save('mlp_mlp2.xml')
#print('Ran for %d iterations'% num_iter)
ret, resp = model.predict(train)
prediction = resp.argmax(-1)
#print('Prediction:', prediction)
true_labels = train_labels.argmax(-1)
#print('True labels:', true_labels)
number = prediction.shape[0]
print('Testing...')
train_rate0 = np.mean(prediction == true_labels)
train_rate1 = np.mean(prediction == true_labels+1)
train_rate2 = np.mean(prediction == true_labels-1)
train_rate = train_rate0 + train_rate1 + train_rate2
print('Train rate: %f:' % (train_rate0*100))
print('Train rate: %f:' % (train_rate*100))
plt.plot(range(number), prediction, 'b')
plt.plot(range(number), true_labels, 'r')
ret2, resp2 = model.predict(test)
prediction2 = resp2.argmax(-1)
#print('Prediction:', prediction)
true_test_labels = test_labels.argmax(-1)
#print('True labels:', true_labels)
number = prediction2.shape[0]
print('Testing...')
test_rate0 = np.mean(prediction2 == true_test_labels)
test_rate1 = np.mean(prediction2 == true_test_labels+1)
test_rate2 = np.mean(prediction2 == true_test_labels-1)
test_rate2 = test_rate0 + test_rate1 + test_rate2
print('Train rate: %f:' % (test_rate0*100))
print('Train rate: %f:' % (test_rate2*100))
plt.show()
|
apache-2.0
|
rcurtin/shogun
|
examples/undocumented/python_modular/graphical/regression_gaussian_process_demo.py
|
16
|
9323
|
###########################################################################
# Mean prediction from Gaussian Processes based on
# classifier_libsvm_minimal_modular.py
# plotting functions have been adapted from the pyGP library
# https://github.com/jameshensman/pyGP
###########################################################################
from numpy import *
from numpy.random import randn
from modshogun import *
from modshogun import *
from modshogun import *
from modshogun import *
import pylab as PL
import matplotlib
import logging as LG
import scipy as SP
from modshogun import GradientModelSelection
from modshogun import ModelSelectionParameters, R_EXP, R_LINEAR
from modshogun import ParameterCombination
def plot_training_data(x, y,
shift=None,
replicate_indices=None,
format_data={'alpha':.5,
'marker':'.',
'linestyle':'--',
'lw':1,
'markersize':9},
draw_arrows=0,
plot_old=False):
"""
Plot training data input x and output y into the
active figure (See http://matplotlib.sourceforge.net/ for details of figure).
Instance plot without replicate groups:
.. image:: ../images/plotTraining.png
:height: 8cm
Instance plot with two replicate groups and a shift in x-koords:
.. image:: ../images/plotTrainingShiftX.png
:height: 8cm
**Parameters:**
x : [double]
Input x (e.g. time).
y : [double]
Output y (e.g. expression).
shift : [double]
The shift of each replicate group.
replicate_indices : [int]
Indices of replicates for each x, rexpectively
format_data : {format}
Format of the data points. See http://matplotlib.sourceforge.net/ for details.
draw_arrows : int
draw given number of arrows (if greator than len(replicate) draw all arrows.
Arrows will show the time shift for time points, respectively.
"""
x_shift = SP.array(x.copy())
if shift is not None and replicate_indices is not None:
assert len(shift) == len(SP.unique(replicate_indices)), 'Need one shift per replicate to plot properly'
_format_data = format_data.copy()
if(format_data.has_key('alpha')):
_format_data['alpha'] = .2*format_data['alpha']
else:
_format_data['alpha'] = .2
number_of_groups = len(SP.unique(replicate_indices))
for i in SP.unique(replicate_indices):
x_shift[replicate_indices == i] -= shift[i]
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
_format_data['color'] = col
if(plot_old):
PL.plot(x[replicate_indices == i], y[replicate_indices == i], **_format_data)
if(draw_arrows):
range = SP.where(replicate_indices == i)[0]
for n in SP.arange(range[0], range[-1], max(1, round(len(range) / draw_arrows))):
offset = round((len(range)-1) / draw_arrows)
n += max(int((i+1)*offset/number_of_groups),1)
PL.text((x_shift[n]+x[n])/2., y[n],
"%.2f"%(-shift[i]),
ha='center',va='center',
fontsize=10)
PL.annotate('', xy=(x_shift[n], y[n]),
xytext=(x[n], y[n]),va='center',
arrowprops=dict(facecolor=col,
alpha=.2,
shrink=.01,
frac=.2,
headwidth=11,
width=11))
#PL.plot(x,y,**_format_data)
if(replicate_indices is not None):
number_of_groups = len(SP.unique(replicate_indices))
#format_data['markersize'] = 13
#format_data['alpha'] = .5
for i in SP.unique(replicate_indices):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x_shift[replicate_indices == i], y[replicate_indices == i], **format_data)
else:
print(x_shift.shape)
number_of_groups = x_shift.shape[0]
for i in xrange(number_of_groups):
col = matplotlib.cm.jet(i / (2. * number_of_groups))
format_data['color'] = col
PL.plot(x[i], y[i], **format_data)
# return PL.plot(x_shift,y,**format_data)
def plot_sausage(X, mean, std, alpha=None, format_fill={'alpha':0.3, 'facecolor':'k'}, format_line=dict(alpha=1, color='g', lw=3, ls='dashed')):
"""
plot saussage plot of GP. I.e:
.. image:: ../images/sausage.png
:height: 8cm
**returns:** : [fill_plot, line_plot]
The fill and the line of the sausage plot. (i.e. green line and gray fill of the example above)
**Parameters:**
X : [double]
Interval X for which the saussage shall be plottet.
mean : [double]
The mean of to be plottet.
std : [double]
Pointwise standard deviation.
format_fill : {format}
The format of the fill. See http://matplotlib.sourceforge.net/ for details.
format_line : {format}
The format of the mean line. See http://matplotlib.sourceforge.net/ for details.
"""
X = X.squeeze()
Y1 = (mean + 2 * std)
Y2 = (mean - 2 * std)
if(alpha is not None):
old_alpha_fill = min(1, format_fill['alpha'] * 2)
for i, a in enumerate(alpha[:-2]):
format_fill['alpha'] = a * old_alpha_fill
hf = PL.fill_between(X[i:i + 2], Y1[i:i + 2], Y2[i:i + 2], lw=0, **format_fill)
i += 1
hf = PL.fill_between(X[i:], Y1[i:], Y2[i:], lw=0, **format_fill)
else:
hf = PL.fill_between(X, Y1, Y2, **format_fill)
hp = PL.plot(X, mean, **format_line)
return [hf, hp]
class CrossRect(matplotlib.patches.Rectangle):
def __init__(self, *args, **kwargs):
matplotlib.patches.Rectangle.__init__(self, *args, **kwargs)
#self.ax = ax
# def get_verts(self):
# rectverts = matplotlib.patches.Rectangle.get_verts(self)
# return verts
def get_path(self, *args, **kwargs):
old_path = matplotlib.patches.Rectangle.get_path(self)
verts = []
codes = []
for vert, code in old_path.iter_segments():
verts.append(vert)
codes.append(code)
verts.append([1, 1])
codes.append(old_path.LINETO)
new_path = matplotlib.artist.Path(verts, codes)
return new_path
def create_toy_data():
#0. generate Toy-Data; just samples from a superposition of a sin + linear trend
xmin = 1
xmax = 2.5*SP.pi
x = SP.arange(xmin,xmax,(xmax-xmin)/100.0)
C = 2 #offset
sigma = 0.5
b = 0
y = b*x + C + 1*SP.sin(x)
# dy = b + 1*SP.cos(x)
y += sigma*random.randn(y.shape[0])
y-= y.mean()
x = x[:,SP.newaxis]
return [x,y]
def run_demo():
LG.basicConfig(level=LG.INFO)
random.seed(572)
#1. create toy data
[x,y] = create_toy_data()
feat_train = RealFeatures(transpose(x));
labels = RegressionLabels(y);
n_dimensions = 1
#2. location of unispaced predictions
X = SP.linspace(0,10,10)[:,SP.newaxis]
#new interface with likelihood parametres being decoupled from the covaraince function
likelihood = GaussianLikelihood()
covar_parms = SP.log([2])
hyperparams = {'covar':covar_parms,'lik':SP.log([1])}
#construct covariance function
SECF = GaussianKernel(feat_train, feat_train,2)
covar = SECF
zmean = ZeroMean();
inf = ExactInferenceMethod(SECF, feat_train, zmean, labels, likelihood);
gp = GaussianProcessRegression(inf, feat_train, labels);
root=ModelSelectionParameters();
c1=ModelSelectionParameters("inference_method", inf);
root.append_child(c1);
c2 = ModelSelectionParameters("scale");
c1.append_child(c2);
c2.build_values(0.01, 4.0, R_LINEAR);
c3 = ModelSelectionParameters("likelihood_model", likelihood);
c1.append_child(c3);
c4=ModelSelectionParameters("sigma");
c3.append_child(c4);
c4.build_values(0.001, 4.0, R_LINEAR);
c5 =ModelSelectionParameters("kernel", SECF);
c1.append_child(c5);
c6 =ModelSelectionParameters("width");
c5.append_child(c6);
c6.build_values(0.001, 4.0, R_LINEAR);
crit = GradientCriterion();
grad=GradientEvaluation(gp, feat_train, labels,
crit);
grad.set_function(inf);
gp.print_modsel_params();
root.print_tree();
grad_search=GradientModelSelection(
root, grad);
grad.set_autolock(0);
best_combination=grad_search.select_model(1);
gp.set_return_type(GaussianProcessRegression.GP_RETURN_COV);
St = gp.apply_regression(feat_train);
St = St.get_labels();
gp.set_return_type(GaussianProcessRegression.GP_RETURN_MEANS);
M = gp.apply_regression();
M = M.get_labels();
#create plots
plot_sausage(transpose(x),transpose(M),transpose(SP.sqrt(St)));
plot_training_data(x,y);
PL.show();
if __name__ == '__main__':
run_demo()
|
gpl-3.0
|
rjferrier/fluidity
|
examples/hokkaido-nansei-oki_tsunami/plotgages.py
|
5
|
2682
|
#!/usr/bin/env python
from fluidity_tools import stat_parser
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, show
import getopt
import sys
import csv
import tools
def usage():
print "plotgages.py --file=filename.detectors"
def main(argv=None):
mea_filename='raw_data/WaveGages.csv'
# Offsets:
surface_heigth=-0.2
d0=0.0005
offset=-surface_heigth-d0
try:
opts, args = getopt.getopt(sys.argv[1:], "", ['file='])
except getopt.GetoptError:
print "Getopterror :("
usage()
sys.exit(2)
filename=''
for opt, arg in opts:
if opt == '--file':
filename=arg
elif opt=='-h' or opt=='--help':
usage()
sys.exit(2)
if filename=='':
print 'No filename specified. You have to supply the detectos filename'
usage()
sys.exit(2)
print "Generating plots"
s = stat_parser(filename)
timesteps=s["ElapsedTime"]["value"]
timestep=timesteps[1]-timesteps[0]
print "Found ", len(timesteps), " timesteps with dt=", timestep, " starting at t0=", timesteps[0]-timestep
fs=s["water"]["FreeSurface"]
print "Found ", len(fs), " free surface detectors."
# fill in measurement data
mea_gauge1=[]
mea_gauge2=[]
mea_gauge3=[]
for i in range(0, len(timesteps)):
gauges=tools.get_measurement(mea_filename, timesteps[i])
mea_gauge1.append(gauges[0])
mea_gauge2.append(gauges[1])
mea_gauge3.append(gauges[2])
plt.ion() # switch in interactive mode
fig1= figure()
# fig2 = figure()
# fig3 = figure()
subplt1 = fig1.add_subplot(311, title='Gauge 1', xlabel='Time [s]', ylabel='Free surface [cm]')
subplt2 = fig1.add_subplot(312, title='Gauge 2', xlabel='Time [s]', ylabel='Free surface [cm]')
subplt3 = fig1.add_subplot(313, title='Gauge 3', xlabel='Time [s]', ylabel='Free surface [cm]')
subplt1.plot(timesteps, s["water"]["FreeSurface"]["gauge1"]+offset, label='ICOM') # plot gauge1 detector data
subplt1.plot(timesteps, mea_gauge1, label='Experimental data') # plot gauge1 measurement data
subplt2.plot(timesteps, s["water"]["FreeSurface"]["gauge2"]+offset, label='ICOM') # plot gauge2 detector data
subplt2.plot(timesteps, mea_gauge2, label='Experimental data') # plot gauge2 measurement data
subplt3.plot(timesteps, s["water"]["FreeSurface"]["gauge3"]+offset, label='ICOM') # plot gauge3 detector data
subplt3.plot(timesteps, mea_gauge3, label='Experimental data') # plot gauge3 measurement data
# subplt1.xlabel('Time [s]')
subplt1.legend()
plt.draw()
# for i in range(timesteps):
# gauge1.append(s["water"]["FreeSurface"]["gauge1"])
raw_input("Press Enter to exit")
if __name__ == "__main__":
main()
|
lgpl-2.1
|
guildai/guild
|
guild/commands/check_impl.py
|
1
|
16415
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import json
import logging
import os
import platform
import re
import subprocess
import sys
import warnings
import click
import pkg_resources
import guild
from guild import _test
from guild import cli
from guild import config
from guild import file_util
from guild import plugin
from guild import uat
from guild import util
from guild import var
from . import remote_impl_support
log = logging.getLogger("guild")
# (mod_name, required_flag)
CHECK_MODS = [
("click", True),
("dask", False),
("distutils", True),
("numpy", True),
("pandas", False),
("pip", True),
("sklearn", True),
("skopt", True),
("setuptools", True),
("twine", False),
("yaml", True),
("werkzeug", True),
]
ATTR_COL_WIDTH = 26
class Check(object):
def __init__(self, args):
self.args = args
self._errors = False
self.offline = self._init_offline(args)
self.newer_version_available = False
@staticmethod
def _init_offline(args):
if args.offline is not None:
return args.offline
else:
return _check_config().get("offline")
def error(self):
self._errors = True
@property
def has_error(self):
return self._errors
def _check_config():
return config.user_config().get("check") or {}
def main(args):
if args.remote:
remote_impl_support.check(args)
else:
_check(args)
def _check(args):
try:
_check_impl(args)
except SystemExit as e:
_maybe_notify(args, e)
raise
else:
_maybe_notify(args)
def _check_impl(args):
if args.version:
_check_version(args.version)
if args.uat:
_uat_and_exit()
check = Check(args)
if not args.no_info:
_print_info(check)
if args.all_tests or args.tests:
_run_tests(check)
if check.has_error:
_print_error_and_exit(args)
def _check_version(req):
try:
match = guild.test_version(req)
except ValueError:
cli.error(
"invalid requirement spec '%s'\n"
"See https://bit.ly/guild-help-req-spec for more information." % req
)
else:
if not match:
cli.error(
"version mismatch: current version '%s' does not match '%s'"
% (guild.__version__, req)
)
def _uat_and_exit():
os.environ["NO_IMPORT_FLAGS_PROGRESS"] = "1"
uat.run()
sys.exit(0)
def _run_tests(check):
with util.Env(
{
"NO_IMPORT_FLAGS_PROGRESS": "1",
"COLUMNS": "999",
"SYNC_RUN_OUTPUT": "1",
# The following are optimizations for tests. They must be
# overridden for any tests that check the disabled behavior.
"NO_PIP_FREEZE": "1",
"NO_VCS_COMMIT": "1",
}
):
_run_tests_(check)
def _run_tests_(check):
if check.args.all_tests:
if check.args.tests:
log.warning(
"running all tests (--all-tests specified) - "
"ignoring individual tests"
)
success = _test.run_all(skip=check.args.skip)
elif check.args.tests:
if check.args.skip:
log.warning("running individual tests - ignoring --skip")
success = _test.run(check.args.tests)
if not success:
check.error()
def _print_info(check):
_print_guild_info(check)
_print_python_info(check)
if not check.args.env:
_print_platform_info(check)
_print_psutil_info(check)
_print_tensorboard_info(check)
if check.args.tensorflow:
_print_tensorflow_info(check)
if check.args.pytorch:
_print_pytorch_info(check)
_print_cuda_info(check)
_print_nvidia_tools_info(check)
if check.args.verbose:
_print_mods_info(check)
_print_guild_latest_versions(check)
if check.newer_version_available:
_notify_newer_version()
if not check.args.env:
if check.args.space:
_print_disk_usage()
def _print_guild_info(check):
_attr("guild_version", _safe_apply(check, guild.version))
_attr("guild_install_location", _safe_apply(check, _guild_install_location))
_attr("guild_home", _safe_apply(check, config.guild_home))
_attr("guild_resource_cache", _safe_apply(check, _guild_resource_cache))
if not check.args.env:
_attr("installed_plugins", _safe_apply(check, _format_plugins))
def _attr(name, val):
cli.out("%s:%s%s" % (name, (ATTR_COL_WIDTH - len(name)) * " ", val))
def _safe_apply(check, f, *args, **kw):
"""Always return a string for application f(*args, **kw).
If f(*args, **kw) fails, returns a higlighted error message and
sets error flag on check.
"""
try:
return f(*args, **kw)
except Exception as e:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("safe call: %r %r %r", f, args, kw)
check.error()
return _warn("ERROR: %s" % e)
def _guild_install_location():
return pkg_resources.resource_filename("guild", "")
def _guild_resource_cache():
return util.realpath(var.cache_dir("resources"))
def _format_plugins():
names = set([name for name, _ in plugin.iter_plugins()])
return ", ".join(sorted(names))
def _print_python_info(check):
_attr("python_version", _safe_apply(check, _python_version))
_attr("python_exe", sys.executable)
if check.args.verbose:
_attr("python_path", _safe_apply(check, _python_path))
def _python_version():
return sys.version.replace("\n", "")
def _python_path():
return os.path.pathsep.join(sys.path)
def _print_platform_info(check):
_attr("platform", _safe_apply(check, _platform))
def _platform():
system, _node, release, _ver, machine, _proc = platform.uname()
return " ".join([system, release, machine])
def _print_psutil_info(check):
ver = _try_module_version("psutil", check)
_attr("psutil_version", ver)
def _print_tensorboard_info(check):
_attr("tensorboard_version", _safe_apply(check, _tensorboard_version, check))
def _tensorboard_version(check):
try:
import tensorboard.version as version
except ImportError:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("tensorboard version")
check.error() # TB is required
return _warn("not installed")
else:
return version.VERSION
def _print_tensorflow_info(check):
# Run externally to avoid tf logging to our stderr.
cmd = [sys.executable, "-um", "guild.commands.tensorflow_check_main"]
env = util.safe_osenv()
env["PYTHONPATH"] = os.path.pathsep.join(sys.path)
if check.args.verbose:
stderr = None
else:
stderr = open(os.devnull, "w")
p = subprocess.Popen(cmd, stderr=stderr, env=env)
exit_status = p.wait()
if exit_status != 0:
check.error()
def _print_pytorch_info(check):
torch = _try_import_torch()
if not torch:
_attr("pytorch_version", _warn("not installed"))
return
_attr("pytorch_version", _safe_apply(check, _torch_version, torch))
_attr("pytorch_cuda_version", _safe_apply(check, _torch_cuda_version, torch))
_attr("pytorch_cuda_available", _safe_apply(check, _torch_cuda_available, torch))
_attr("pytorch_cuda_devices", _safe_apply(check, _pytorch_cuda_devices, torch))
def _try_import_torch():
# pylint: disable=import-error
try:
import torch
import torch.version as _
except Exception:
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("try import torch")
return None
else:
return torch
def _torch_version(torch):
return torch.version.__version__
def _torch_cuda_version(torch):
return torch.version.cuda
def _torch_cuda_available(torch):
if torch.cuda.is_available():
return "yes"
else:
return "no"
def _pytorch_cuda_devices(torch):
if torch.cuda.device_count == 0:
return "none"
return ", ".join(
"%s (%i)" % (torch.cuda.get_device_name(i), i)
for i in range(torch.cuda.device_count())
)
def _print_cuda_info(check):
_attr("cuda_version", _safe_apply(check, _cuda_version))
def _cuda_version():
version = util.find_apply([_cuda_version_nvcc, _cuda_version_nvidia_smi])
if not version:
return "not installed"
return version
def _cuda_version_nvcc():
nvcc = util.which("nvcc")
if not nvcc:
return None
try:
out = subprocess.check_output([nvcc, "--version"])
except subprocess.CalledProcessError as e:
return _warn("ERROR: %s" % e.output.strip())
else:
out = out.decode("utf-8")
m = re.search(r"V([0-9\.]+)", out, re.MULTILINE)
if m:
return m.group(1)
else:
log.debug("Unexpected output from nvcc: %s", out)
return "unknown (error)"
def _cuda_version_nvidia_smi():
nvidia_smi = util.which("nvidia-smi")
if not nvidia_smi:
return None
try:
out = subprocess.check_output([nvidia_smi, "--query"])
except subprocess.CalledProcessError as e:
return _warn("ERROR: %s" % e.output.strip())
else:
out = out.decode("utf-8")
m = re.search(r"CUDA Version\s+: ([0-9\.]+)", out, re.MULTILINE)
if m:
return m.group(1)
else:
log.debug("Unexpected output from nvidia-smi: %s", out)
return "unknown (error)"
def _print_nvidia_tools_info(check):
_attr("nvidia_smi_version", _safe_apply(check, _nvidia_smi_version))
def _nvidia_smi_version():
cmd = util.which("nvidia-smi")
if not cmd:
return "not installed"
try:
out = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
return _warn("ERROR: %s" % e.output.strip())
else:
out = out.decode("utf-8")
m = re.search(r"NVIDIA-SMI ([0-9\.]+)", out)
if m:
return m.group(1)
else:
log.debug("Unexpected output from nvidia-smi: %s", out)
return "unknown (error)"
def _print_mods_info(check):
for mod, required in CHECK_MODS:
ver = _try_module_version(mod, check, required)
_attr("%s_version" % mod, ver)
def _try_module_version(name, check, required=True, version_attr="__version__"):
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", RuntimeWarning)
mod = __import__(name)
except ImportError as e:
if required:
check.error()
if log.getEffectiveLevel() <= logging.DEBUG:
log.exception("import %s", name)
return _warn(_not_installed_msg(e))
else:
try:
ver = getattr(mod, version_attr)
except AttributeError:
return _warn("UNKNOWN")
else:
return _format_version(ver)
def _not_installed_msg(e):
if "No module named " in str(e):
return "not installed"
else:
return "not installed (%s)" % e
def _format_version(ver):
if isinstance(ver, tuple):
return ".".join([str(part) for part in ver])
else:
return str(ver)
def _print_guild_latest_versions(check):
if check.offline:
_attr("latest_guild_version", "unchecked (offline)")
else:
cur_ver = guild.__version__
latest_ver = _latest_version(check)
latest_ver_desc = latest_ver or "unknown (error)"
_attr("latest_guild_version", latest_ver_desc)
if latest_ver:
check.newer_version_available = _is_newer(latest_ver, cur_ver)
def _latest_version(check):
url = _latest_version_url(check)
log.debug("getting latest version from %s", url)
data = {
"guild-version": guild.__version__,
"python-version": _python_short_version(),
"platform": _platform(),
}
try:
resp = util.http_post(url, data, timeout=5)
except Exception as e:
log.debug("error reading latest version: %s", e)
return None
else:
if resp.status_code == 404:
log.debug("error reading latest version: %s not found" % url)
return None
if resp.status_code != 200:
log.debug("error reading latest version: %s" % resp.text)
return None
return _parse_latest_version(resp.text)
def _latest_version_url(check):
return _check_config().get("check-url") or check.args.check_url
def _python_short_version():
return sys.version.split(" ", 1)[0]
def _parse_latest_version(s):
try:
decoded = json.loads(s)
except Exception as e:
log.debug("error parsing latest version response %s: %s", s, e)
return None
else:
return decoded.get("latest-version", "unknown")
def _is_newer(latest, cur):
return pkg_resources.parse_version(latest) > pkg_resources.parse_version(cur)
def _notify_newer_version():
cli.out(
click.style(
"A newer version of Guild AI is available. Run "
"'pip install guildai --upgrade' to install it.",
bold=True,
),
err=True,
)
def _print_disk_usage():
cli.out("disk_space:")
paths = [
("guild_home", config.guild_home()),
("runs", var.runs_dir()),
("deleted_runs", var.runs_dir(deleted=True)),
("remote_state", var.remote_dir()),
("cache", var.cache_dir()),
]
formatted_disk_usage = [_formatted_disk_usage(path) for _name, path in paths]
max_disk_usage_width = max([len(s) for s in formatted_disk_usage])
for (name, path), disk_usage in zip(paths, formatted_disk_usage):
_attr(
" %s" % name,
_format_disk_usage_and_path(disk_usage, path, max_disk_usage_width),
)
def _formatted_disk_usage(path):
if os.path.exists(path):
size = file_util.disk_usage(path)
else:
size = 0
return util.format_bytes(size)
def _format_disk_usage_and_path(usage, path, max_usage_width):
return "%s%s%s" % (usage, " " * (max_usage_width - len(usage) + 1), path)
def _print_error_and_exit(args):
if args.all_tests or args.tests:
msg = _tests_failed_msg()
else:
msg = _general_error_msg(args)
cli.error(msg)
def _tests_failed_msg():
return "one or more tests failed - see above for details"
def _general_error_msg(args):
msg = (
"there are problems with your setup\n"
"Refer to the issues above for more information"
)
if not args.verbose:
msg += " or rerun check with the --verbose option."
return msg
def _warn(msg):
return click.style(msg, fg="red", bold=True)
def _maybe_notify(args, error=None):
if not args.notify:
return
notify_send = util.which("notify-send")
if not notify_send:
log.warning("cannot notify check result - notify-send not available")
return
summary, body, urgency = _notify_cmd_params(error)
cmd = ["notify-send", "-u", urgency, summary, body]
_ = subprocess.check_output(cmd)
def _notify_cmd_params(error):
from guild import main
summary = "guild check"
body = "PASSED"
urgency = "normal"
if error:
error_msg, code = main.system_exit_params(error)
# SystemExit errors are used for 0 exit codes, which are not
# actually errors.
if code != 0:
body = "FAILED (%s)" % code
if error_msg:
body += ": %s" % error_msg
urgency = "critical"
return summary, body, urgency
|
apache-2.0
|
camallen/aggregation
|
experimental/serengeti/IAAI/alg.py
|
2
|
1957
|
#!/usr/bin/env python
__author__ = 'greg'
from nodes import setup, speciesList
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr
numUser = [5,10,15,20,25]
algPercent = []
currPercent = []
for i in numUser:
print i
algPercent.append([])
currPercent.append([])
for j in range(20):
photos,users = setup(tau=1)
for p in photos.values():
p.__sample__(i)
for u in users.values():
u.__prune__()
#initialize things using majority voting
for p in photos.values():
p.__majorityVote__()
for k in range(1):
#estimate the user's "correctness"
for u in users.values():
for s in speciesList:
u.__speciesCorrect__(s,beta=0.01)
for p in photos.values():
p.__weightedMajorityVote__()
correct = 0
total = 0.
for p in photos.values():
if p.__goldStandardCompare__():
correct += 1
total += 1
algPercent[-1].append(correct/total)
# for p in photos.values():
# p.__currAlg__()
#
# correct = 0
# total = 0.
# for p in photos.values():
# if p.__goldStandardCompare__():
# correct += 1
# total += 1
#
# currPercent[-1].append(correct/total)
meanValues = [np.mean(p)*100 for p in algPercent]
std = [np.std(p)*100 for p in algPercent]
plt.errorbar(numUser, meanValues, yerr=std,fmt="-o",color="black")
#meanValues = [np.mean(p) for p in currPercent]
#std = [np.std(p) for p in currPercent]
#plt.errorbar(numUser, meanValues, yerr=std)
plt.plot([5,25],[96.4,96.4],"--", color="grey")
#plt.legend(("Our Algorithm","Current Algorithm"), "lower right")
plt.xlabel("Number of Users per Photo")
plt.ylabel("Accuracy (%)")
plt.xlim((4,26))
plt.ylim((93,100))
plt.show()
|
apache-2.0
|
kmike/scikit-learn
|
sklearn/manifold/isomap.py
|
6
|
7139
|
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD, (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_arrays
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically
'FW' : Floyd-Warshall algorithm
'D' : Dijkstra algorithm with Fibonacci Heaps
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
`embedding_` : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
`kernel_pca_` : object
`KernelPCA` object used to implement the embedding.
`training_data_` : array-like, shape (n_samples, n_features)
Stores the training data.
`nbrs_` : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
`dist_matrix_` : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
[1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X, = check_arrays(X, sparse_format='dense')
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, cKDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
|
bsd-3-clause
|
tlhr/plumology
|
plumology/learn/som.py
|
1
|
12304
|
"""som - Self-organising-map"""
from typing import Optional, Tuple
import numpy as np
from sklearn.decomposition import PCA
class SOM:
"""
SOM - Self-Organising-Map.
A 2D neural network that clusters high-dimensional data iteratively.
Parameters
----------
nx : Number of neurons on x-axis.
ny : Number of neurons on y-axis.
ndims : Dimension of input data.
iterations : Total number of iterations to perform.
Should be at least 10 times the number of neurons.
learning_rate : The learning rate specifies the
tradeoff between speed and accuracy of the SOM.
distance : { 'euclidean', 'periodic' }
The distance metric to use.
init : { 'random', 'pca' }
Initialization method. "pca" uses a grid spanned by the first two
eigenvectors of the principal component analysis of the input data.
grid : { 'rect', 'hex' }
Layout of the SOM, can be either rectangular or hexagonal with
equidistant nodes. The latter can provide smoother visualization.
train : { 'seq', 'batch' }
Training algorithm to use. Sequential picks random feature vectors one
at a time, while batch mode trains using all features per iteration.
This can significantly speed up convergence.
neighbour : { 'gaussian', 'bubble', 'epanechnikov' }
Type of neighbourhood decay function to use. "bubble" uses a hard
cutoff, "gaussian" falls off smoothly, and "epanechnikov" starts
smoothly and ends with a hard cutoff.
learning : { 'exponential', 'linear' }
Type of decay for the learning rate. A linear decay can
improve results in certain cases.
seed : Seed for the random number generator.
Attributes
----------
grid : Grid with all x, y positiions of the nodes.
Useful for visualization.
weights : Weight vectors of the SOM in shape = (nx, ny, ndims).
Examples
--------
Here we train a 20 by 30 SOM on some colors:
>>> from plumology.learn import SOM
>>> som = SOM(20, 30, 3, iterations=400, learning_rate=0.2)
>>> colors = np.array(
... [[0., 0., 0.],
... [0., 0., 1.],
... [0., 1., 1.],
... [1., 0., 1.],
... [1., 1., 0.],
... [1., 1., 1.],
... [.33, .33, .33],
... [.5, .5, .5],
... [.66, .66, .66]]
... )
>>> som.fit(colors)
References
----------
Kohonen, T., "Self-Organized Formation of Topologically Correct
Feature Maps". In: Biological Cybernetics 43 (1): 59–69 (1982).
"""
def __init__(
self,
nx: int,
ny: int,
ndims: int,
iterations: int,
learning_rate: float=0.5,
distance: str='euclid',
init: str='random',
grid: str='rect',
train: str='seq',
neighbour: str='gaussian',
learning: str='exp',
seed: Optional[int]=None
) -> None:
self._iterations = iterations
self._init_learning_rate = learning_rate
self._learning_rate = self._init_learning_rate
self._ndims = ndims
self._map_radius = max(nx, ny) / 2
self._dlambda = self._iterations / np.log(self._map_radius)
self._shape = (nx, ny)
self._trained = False
if seed is not None:
np.random.seed(seed)
# Establish training algorithm
if train.startswith('seq'):
self._type = 's'
elif train.startswith('batch'):
self._type = 'b'
else:
e = 'Invalid training type! Valid types: sequential, batch'
raise ValueError(e)
# Init distance type
if distance.startswith('euclid'):
self._dist = self._euclid_dist
elif distance.startswith('per'):
self._dist = self._periodic_dist
else:
e = 'Invalid distance type! Valid types: euclidean, periodic'
raise ValueError(e)
# Init weights
if init.startswith('r'):
self.weights = np.random.rand(nx, ny, ndims)
elif not init.startswith('p'):
e = 'Invalid initialization type! Valid types: random, pca'
raise ValueError(e)
# Init grid
self._X, self._Y = np.meshgrid(np.arange(ny), np.arange(nx))
if grid.startswith('r'):
self._locX = self._X
self._locY = self._Y
elif grid.startswith('h'):
self._locX = np.asarray([
x + 0.5 if i % 2 == 0 else x
for i, x in enumerate(self._X.astype(float))
])
self._locY = self._Y * 0.33333
else:
e = 'Invalid grid type! Valid types: rect, hex'
raise ValueError(e)
# Init neighbourhood function
if neighbour.startswith('gauss'):
self._nb = self._nb_gaussian
elif neighbour.startswith('bub'):
self._nb = self._nb_bubble
elif neighbour.startswith('epa'):
self._nb = self._nb_epanechnikov
else:
e = ('Invalid neighbourhood function!' +
'Valid types: gaussian, bubble, epanechnikov')
raise ValueError(e)
# Init learning-rate function
if learning.startswith('exp'):
self._lr = self._lr_exp
elif learning.startswith('pow'):
self._final_lr = self._init_learning_rate * np.exp(-1)
self._lr = self._lr_pow
elif learning.startswith('lin'):
self._lr = self._lr_lin
else:
e = ('Invalid learning rate function!' +
'Valid types: exp, power, linear')
raise ValueError(e)
# Create empty index grid
self.index = np.zeros(self._shape, dtype=np.int32)
# Output grid for easier plotting
self.grid = np.asarray(list(zip(self._locX.flatten(),
self._locY.flatten())))
def _init_weights(self, X: np.ndarray) -> None:
"""Initialize weights from PCA eigenvectors"""
if not hasattr(self, 'weights'):
pca = PCA(n_components=self._ndims)
comp = pca.fit(X).components_[:2]
coeff = X.mean(0) + 5 * X.std(0) / self._shape[0]
# Create grid based on PCA eigenvectors and std dev of features
raw_weights = np.asarray([
(coeff * (comp[0] * (x - 0.5 / self._shape[0]) +
comp[1] * (y - 0.5 / self._shape[1])))
for x, y in zip(np.nditer(self._X.flatten()),
np.nditer(self._Y.flatten()))
]).reshape(self._shape + (self._ndims,))
# Scale to (0, 1)
full_shape = self._shape + (1,)
self.weights = (
(raw_weights - raw_weights.min(2).reshape(full_shape)) /
raw_weights.ptp(2).reshape(full_shape)
)
@staticmethod
def _nb_gaussian(dist: np.ndarray, sigma: float) -> np.ndarray:
return np.exp(-dist ** 2 / (2 * sigma ** 2))
@staticmethod
def _nb_bubble(dist: np.ndarray, sigma: float) -> np.ndarray:
return dist
@staticmethod
def _nb_epanechnikov(dist: np.ndarray, sigma: float) -> np.ndarray:
return np.maximum(np.zeros_like(dist), 1 - dist ** 2)
def _lr_exp(self, t: int) -> float:
return self._init_learning_rate * np.exp(-t / self._iterations)
def _lr_pow(self, t: int) -> float:
return (self._init_learning_rate *
(self._final_lr / self._init_learning_rate) **
(t / self._iterations))
def _lr_lin(self, t: int) -> float:
return (self._init_learning_rate -
(self._init_learning_rate * t * (np.exp(1) - 1) /
(self._iterations * np.exp(1))))
def _euclid_dist(
self,
xmat: np.ndarray,
index: Tuple[int, int]=(),
axis: int=2
) -> np.ndarray:
return np.sqrt(((xmat - self.weights[index]) ** 2).sum(axis=axis))
def _periodic_dist(
self,
xmat: np.ndarray,
index: Tuple[int, int]=(),
axis: int=2
) -> np.ndarray:
pi2 = np.pi * 2
dx = (xmat - self.weights[index]) / pi2
return np.sqrt((((dx - round(dx)) * pi2) ** 2).sum(axis=axis))
def _train(self, X: np.ndarray) -> None:
for t in range(self._iterations):
# Update learning rate, reduce radius
lr = self._lr(t)
neigh_radius = self._map_radius * np.exp(-t / self._dlambda)
# Choose random feature vector
f = X[np.random.choice(len(X))]
# Calc euclidean distance
xmat = np.broadcast_to(f, self._shape + (self._ndims,))
index = self._dist(xmat).argmin()
bmu = np.unravel_index(index, self._shape)
# Create distance matrix
distmat = (
(self._locX - self._locX[bmu]) ** 2 +
(self._locY - self._locY[bmu]) ** 2
).reshape(self._shape + (1,))
# Mask out unaffected nodes
mask = (distmat < neigh_radius).astype(int)
theta = self._nb(distmat * mask, neigh_radius)
self.weights += mask * theta * lr * (f - self.weights)
def _batch_train(self, X: np.ndarray) -> None:
for t in range(self._iterations):
# Update learning rate, reduce radius
lr = self._lr(t)
neigh_radius = self._map_radius * np.exp(-t / self._dlambda)
for f in X:
# Calc euclidean distance
xmat = np.broadcast_to(f, self._shape + (self._ndims,))
index = self._dist(xmat).argmin()
bmu = np.unravel_index(index, self._shape)
# Create distance matrix
distmat = (
(self._locX - self._locX[bmu]) ** 2 +
(self._locY - self._locY[bmu]) ** 2
).reshape(self._shape + (1,))
# Mask out unaffected nodes
mask = (distmat < neigh_radius).astype(int)
theta = self._nb(distmat * mask, neigh_radius)
self.weights += mask * theta * lr * (f - self.weights)
def fit(self, X: np.ndarray) -> None:
"""
Run the SOM.
Parameters
----------
X : input data as array of vectors.
"""
self._init_weights(X)
if self._type == 's':
self._train(X)
else:
self._batch_train(X)
self._trained = True
def create_index(self, X: np.ndarray) -> None:
"""
Create an index grid, allowing the coloring of the map with arbitrary
feature data. For instance, one could train the SOM on a subset of the
data, and then create an index using the full dataset. The transform()
method will only need to check the created index grid featuring the
best matching datapoint index per node.
Parameters
----------
X : input data as used to train the SOM, can be significantly larger.
"""
if not self._trained:
raise ValueError('You need to train the SOM first!')
# For each node we calculate the distance to each datapoint
for index in np.ndindex(self._shape):
self.index[index] = self._dist(X, index=index, axis=1).argmin()
def transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform a dataset based on the index grid created by index().
This method will return a subset of the dataset in the shape of
the node matrix.
Parameters
----------
X : input data
Returns
-------
grid : subset of the input data assigned to the best nodes
"""
if not self._trained:
raise ValueError('You need to train the SOM first!')
if not hasattr(self, 'index'):
raise ValueError('You need to index the SOM first!')
grid = np.zeros(self._shape)
for index in np.ndindex(self.index.shape):
grid[index] = X[self.index[index]]
return grid
|
mit
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/skimage/viewer/canvastools/recttool.py
|
43
|
8886
|
from matplotlib.widgets import RectangleSelector
from ...viewer.canvastools.base import CanvasToolBase
from ...viewer.canvastools.base import ToolHandles
__all__ = ['RectangleTool']
class RectangleTool(CanvasToolBase, RectangleSelector):
"""Widget for selecting a rectangular region in a plot.
After making the desired selection, press "Enter" to accept the selection
and call the `on_enter` callback function.
Parameters
----------
manager : Viewer or PlotPlugin.
Skimage viewer or plot plugin object.
on_move : function
Function called whenever a control handle is moved.
This function must accept the rectangle extents as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
rect_props : dict
Properties for :class:`matplotlib.patches.Rectangle`. This class
redefines defaults in :class:`matplotlib.widgets.RectangleSelector`.
Attributes
----------
extents : tuple
Rectangle extents: (xmin, xmax, ymin, ymax).
Examples
----------
>>> from skimage import data
>>> from skimage.viewer import ImageViewer
>>> from skimage.viewer.canvastools import RectangleTool
>>> from skimage.draw import line
>>> from skimage.draw import set_color
>>> viewer = ImageViewer(data.coffee()) # doctest: +SKIP
>>> def print_the_rect(extents):
... global viewer
... im = viewer.image
... coord = np.int64(extents)
... [rr1, cc1] = line(coord[2],coord[0],coord[2],coord[1])
... [rr2, cc2] = line(coord[2],coord[1],coord[3],coord[1])
... [rr3, cc3] = line(coord[3],coord[1],coord[3],coord[0])
... [rr4, cc4] = line(coord[3],coord[0],coord[2],coord[0])
... set_color(im, (rr1, cc1), [255, 255, 0])
... set_color(im, (rr2, cc2), [0, 255, 255])
... set_color(im, (rr3, cc3), [255, 0, 255])
... set_color(im, (rr4, cc4), [0, 0, 0])
... viewer.image=im
>>> rect_tool = RectangleTool(viewer, on_enter=print_the_rect) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
def __init__(self, manager, on_move=None, on_release=None, on_enter=None,
maxdist=10, rect_props=None):
self._rect = None
props = dict(edgecolor=None, facecolor='r', alpha=0.15)
props.update(rect_props if rect_props is not None else {})
if props['edgecolor'] is None:
props['edgecolor'] = props['facecolor']
RectangleSelector.__init__(self, manager.ax, lambda *args: None,
rectprops=props)
CanvasToolBase.__init__(self, manager, on_move=on_move,
on_enter=on_enter, on_release=on_release)
# Events are handled by the viewer
try:
self.disconnect_events()
except AttributeError:
# disconnect the events manually (hack for older mpl versions)
[self.canvas.mpl_disconnect(i) for i in range(10)]
# Alias rectangle attribute, which is initialized in RectangleSelector.
self._rect = self.to_draw
self._rect.set_animated(True)
self.maxdist = maxdist
self.active_handle = None
self._extents_on_press = None
if on_enter is None:
def on_enter(extents):
print("(xmin=%.3g, xmax=%.3g, ymin=%.3g, ymax=%.3g)" % extents)
self.callback_on_enter = on_enter
props = dict(mec=props['edgecolor'])
self._corner_order = ['NW', 'NE', 'SE', 'SW']
xc, yc = self.corners
self._corner_handles = ToolHandles(self.ax, xc, yc, marker_props=props)
self._edge_order = ['W', 'N', 'E', 'S']
xe, ye = self.edge_centers
self._edge_handles = ToolHandles(self.ax, xe, ye, marker='s',
marker_props=props)
self.artists = [self._rect,
self._corner_handles.artist,
self._edge_handles.artist]
self.manager.add_tool(self)
@property
def _rect_bbox(self):
if not self._rect:
return 0, 0, 0, 0
x0 = self._rect.get_x()
y0 = self._rect.get_y()
width = self._rect.get_width()
height = self._rect.get_height()
return x0, y0, width, height
@property
def corners(self):
"""Corners of rectangle from lower left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
xc = x0, x0 + width, x0 + width, x0
yc = y0, y0, y0 + height, y0 + height
return xc, yc
@property
def edge_centers(self):
"""Midpoint of rectangle edges from left, moving clockwise."""
x0, y0, width, height = self._rect_bbox
w = width / 2.
h = height / 2.
xe = x0, x0 + w, x0 + width, x0 + w
ye = y0 + h, y0, y0 + h, y0 + height
return xe, ye
@property
def extents(self):
"""Return (xmin, xmax, ymin, ymax)."""
x0, y0, width, height = self._rect_bbox
xmin, xmax = sorted([x0, x0 + width])
ymin, ymax = sorted([y0, y0 + height])
return xmin, xmax, ymin, ymax
@extents.setter
def extents(self, extents):
x1, x2, y1, y2 = extents
xmin, xmax = sorted([x1, x2])
ymin, ymax = sorted([y1, y2])
# Update displayed rectangle
self._rect.set_x(xmin)
self._rect.set_y(ymin)
self._rect.set_width(xmax - xmin)
self._rect.set_height(ymax - ymin)
# Update displayed handles
self._corner_handles.set_data(*self.corners)
self._edge_handles.set_data(*self.edge_centers)
self.set_visible(True)
self.redraw()
def on_mouse_release(self, event):
if event.button != 1:
return
if not self.ax.in_axes(event):
self.eventpress = None
return
RectangleSelector.release(self, event)
self._extents_on_press = None
# Undo hiding of rectangle and redraw.
self.set_visible(True)
self.redraw()
self.callback_on_release(self.geometry)
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self._set_active_handle(event)
if self.active_handle is None:
# Clear previous rectangle before drawing new rectangle.
self.set_visible(False)
self.redraw()
self.set_visible(True)
RectangleSelector.press(self, event)
def _set_active_handle(self, event):
"""Set active handle based on the location of the mouse event"""
# Note: event.xdata/ydata in data coordinates, event.x/y in pixels
c_idx, c_dist = self._corner_handles.closest(event.x, event.y)
e_idx, e_dist = self._edge_handles.closest(event.x, event.y)
# Set active handle as closest handle, if mouse click is close enough.
if c_dist > self.maxdist and e_dist > self.maxdist:
self.active_handle = None
return
elif c_dist < e_dist:
self.active_handle = self._corner_order[c_idx]
else:
self.active_handle = self._edge_order[e_idx]
# Save coordinates of rectangle at the start of handle movement.
x1, x2, y1, y2 = self.extents
# Switch variables so that only x2 and/or y2 are updated on move.
if self.active_handle in ['W', 'SW', 'NW']:
x1, x2 = x2, event.xdata
if self.active_handle in ['N', 'NW', 'NE']:
y1, y2 = y2, event.ydata
self._extents_on_press = x1, x2, y1, y2
def on_move(self, event):
if self.eventpress is None or not self.ax.in_axes(event):
return
if self.active_handle is None:
# New rectangle
x1 = self.eventpress.xdata
y1 = self.eventpress.ydata
x2, y2 = event.xdata, event.ydata
else:
x1, x2, y1, y2 = self._extents_on_press
if self.active_handle in ['E', 'W'] + self._corner_order:
x2 = event.xdata
if self.active_handle in ['N', 'S'] + self._corner_order:
y2 = event.ydata
self.extents = (x1, x2, y1, y2)
self.callback_on_move(self.geometry)
@property
def geometry(self):
return self.extents
if __name__ == '__main__': # pragma: no cover
from ...viewer import ImageViewer
from ... import data
viewer = ImageViewer(data.camera())
rect_tool = RectangleTool(viewer)
viewer.show()
print("Final selection:")
rect_tool.callback_on_enter(rect_tool.extents)
|
gpl-3.0
|
yanlend/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
53
|
49781
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
# Test handling of explicit additional (not in input) labels to PRF
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
# Test a subset of labels may be requested for PRF
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, 1 - y2), 1)
assert_equal(hamming_loss(y1, 1 - y1), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[+1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, +0.24],
[-2.36, -0.79, -0.27, +0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[+0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, +0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
rahuldhote/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
lensacom/sparkit-learn
|
splearn/linear_model/logistic.py
|
2
|
6215
|
# encoding: utf-8
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model import LogisticRegression
from ..utils.validation import check_rdd
from .base import SparkLinearModelMixin
class SparkLogisticRegression(LogisticRegression, SparkLinearModelMixin):
"""Distributed implementation of scikit-learn's Logistic classifier.
Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
# TODO: REVISIT!
# workaround to keep the classes parameter unchanged
@property
def classes_(self):
return self._classes_
@classes_.setter
def classes_(self, value):
pass
def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
# possible improve to partial_fit in partisions and then average
# in final reduce
self._classes_ = np.unique(classes)
return self._spark_fit(SparkLogisticRegression, Z)
def predict(self, X):
"""Distributed method to predict class labels for samples in X.
Parameters
----------
X : ArrayRDD containing {array-like, sparse matrix}
Samples.
Returns
-------
C : ArrayRDD
Predicted class label per sample.
"""
check_rdd(X, (sp.spmatrix, np.ndarray))
return self._spark_predict(SparkLogisticRegression, X)
def to_scikit(self):
m = self._to_scikit(LogisticRegression)
m.classes_ = self._classes_
return m
|
apache-2.0
|
chrissly31415/amimanera
|
utils/nlp_features.py
|
1
|
24112
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" NLP features
"""
from qsprLib import *
from nltk.stem.porter import PorterStemmer
from bs4 import BeautifulSoup
import re
import difflib
from scipy.spatial.distance import cdist
from sklearn.metrics.pairwise import pairwise_distances
from collections import Counter
import itertools
import math
import gensim
from nltk.corpus import wordnet as wn
from nltk import bigrams
from nltk.corpus import brown
from nltk import Text
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction import text
from nltk import corpus
import pickle
#TODO: right after tfidf, input 2 sparse matrics: def computeSimilarityFeatures(Xs_all,Xs_all_new)
#chwck http://research.microsoft.com/en-us/projects/mslr/feature.aspx
#and for dense def computeSimilarityFeatures(Xall,Xall_new,nsplit)
#2nd place solution home depot: https://www.kaggle.com/c/home-depot-product-search-relevance/forums/t/20427/congrats-to-the-winners
#http://stackoverflow.com/questions/16597265/appending-to-an-empty-data-frame-in-pandas
stemmer = PorterStemmer() # faster
stop_words = text.ENGLISH_STOP_WORDS.union(corpus.stopwords.words('english'))
def str_stemmer(s):
liste = [stemmer.stem(word) for word in s.lower().split()]
liste = " ".join(liste)
return(liste)
def str_common_word(str1, str2):
return sum(int(str2.find(word)>=0) for word in str1.split())
def word_match_share(row):
q1words = {}
q2words = {}
for word in str(row[0]).lower().split():
if word not in stop_words:
q1words[word] = 1.0
for word in str(row[1]).lower().split():
if word not in stop_words:
q2words[word] = 1.0
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_words_in_q1 = [w for w in list(q1words.keys()) if w in q2words]
shared_words_in_q2 = [w for w in list(q2words.keys()) if w in q1words]
R = (len(shared_words_in_q1) + len(shared_words_in_q2))/float(len(q1words) + len(q2words))
return R
# If a word appears only once, we ignore it completely (likely a typo)
# Epsilon defines a smoothing constant, which makes the effect of extremely rare words smaller
def get_weight(count, eps=10000.0, min_count=2):
if count < min_count:
return 0.0
else:
return 1.0 / float(count + eps)
def tfidf_word_match_share(row):
q1words = {}
q2words = {}
for word in str(row[0]).lower().split():
if word not in stop_words:
q1words[word] = 1
for word in str(row[1]).lower().split():
if word not in stop_words:
q2words[word] = 1
if len(q1words) == 0 or len(q2words) == 0:
# The computer-generated chaff includes a few questions that are nothing but stopwords
return 0
shared_weights = [weights.get(w, 0) for w in list(q1words.keys()) if w in q2words] + [weights.get(w, 0) for w in
list(q2words.keys()) if w in q1words]
total_weights = [weights.get(w, 0) for w in q1words] + [weights.get(w, 0) for w in q2words]
R = np.sum(shared_weights) / float(np.sum(total_weights))
return R
weights=0
def get_tfidf_share(X):
global weights
eps = 5000.0
train_qs = pd.Series(X['question1'].tolist() + X['question2'].tolist()).astype(str)
words = (" ".join(train_qs)).lower().split()
counts = Counter(words)
weights = {word: get_weight(count) for word, count in list(counts.items())}
return X.apply(tfidf_word_match_share, axis=1, raw=True)
def information_entropy(text):
log2=lambda x:math.log(x)/math.log(2)
exr={}
infoc=0
for each in text:
try:
exr[each]+=1
except:
exr[each]=1
textlen=len(text)
for k,v in list(exr.items()):
freq = 1.0*v/textlen
infoc+=freq*log2(freq)
infoc*=-1
return infoc
def computeSimilarityFeatures(Xall,columns=['question1','question2'],verbose=False,useOnlyTrain=False,startidx=0,stop_words=None,doSVD=261,vectorizer=None,reducer=None):
print("Compute scipy similarity...for :",columns)
if vectorizer is None:
vectorizer = TfidfVectorizer(min_df=3, max_features=None, strip_accents='unicode', analyzer='word',ngram_range=(1, 5), use_idf=True,smooth_idf=True,sublinear_tf=True,stop_words = stop_words,token_pattern=r'\w{1,}')
print(vectorizer)
if useOnlyTrain:
print("Using train only for TFIDF...")
Xtrain = Xall[startidx:]
Xs1 = vectorizer.fit(Xtrain[columns[0]])
else:
Xs1 = vectorizer.fit(Xall[columns[0]])
Xs1 = vectorizer.transform(Xall[columns[0]])
Xs2 = vectorizer.transform(Xall[columns[1]])
sparse=True
if doSVD is not None:
print("Similiarity with SVD, n_components:",doSVD)
if reducer is None:
reducer=TruncatedSVD(n_components=doSVD, algorithm='randomized', n_iter=5)
print(reducer)
Xs1 = reducer.fit_transform(Xs1)
#print "Variance explaind:",np.sum(reducer.explained_variance_ratio_)
Xs2 = reducer.transform(Xs2)
sparse=False
sim = computeScipySimilarity(Xs1,Xs2,sparse=sparse)
return sim
def computeScipySimilarity(Xs1,Xs2,sparse=False):
Xall_new = np.zeros((Xs1.shape[0],4))
if sparse:
print(Xs1.shape)
print(Xs2.shape)
Xs1 = np.asarray(Xs1.todense())
Xs2 = np.asarray(Xs2.todense())
#Xall_new[:,0] = pairwise_distances(Xs1,Xs2,metric='cosine')
#Xall_new[:,1] = pairwise_distances(Xs1,Xs2,metric='cityblock')
#Xall_new[:,2] = pairwise_distances(Xs1,Xs2,metric='hamming')
#Xall_new[:,3] = pairwise_distances(Xs1,Xs2,metric='euclidean')
#Xall_new = pd.DataFrame(Xall_new,columns=['cosine','cityblock','hamming','euclidean'])
#print Xall_new.head(30)
for i,(a,b) in enumerate(zip(Xs1,Xs2)):
a = a.reshape(-1,a.shape[0])
b = b.reshape(-1,b.shape[0])
#print a.shape
#print type(a)
dist = cdist(a,b,'cosine')
Xall_new[i,0] = dist
#Xall_new[i,3] = dist
dist = cdist(a,b,'cityblock')
Xall_new[i,1] = dist
dist = cdist(a,b,'hamming')
Xall_new[i,2] = dist
dist = cdist(a,b,'euclidean')
Xall_new[i,3] = dist
#dist = cdist(a,b,'correlation')
#Xall_new[i,5] = dist
#dist = cdist(a,b,'jaccard')
#Xall_new[i,4] = dist
Xall_new = pd.DataFrame(Xall_new,columns=['cosine','cityblock','hamming','euclidean'])
print("NA,before:",Xall_new.isnull().values.sum())
Xall_new = Xall_new.fillna(0.0)
print("NA,after:",Xall_new.isnull().values.sum())
print(Xall_new.corr(method='spearman'))
return Xall_new
def getSynonyms(word,stemmer):
#synonyms=[word]
try:
synonyms = [l.lemma_names() for l in wn.synsets(word)]
except:
pass
synonyms.append([word])
synonyms = list(itertools.chain(*synonyms))
synonyms = [stemmer.stem(l.lower()) for l in synonyms]
synonyms = set(synonyms)
return(synonyms)
def makeQuerySynonyms(Xall,construct_map=False):
query_map={}
if construct_map:
print("Creating synonyma for query...")
model = gensim.models.Word2Vec.load_word2vec_format('/home/loschen/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
X_temp = Xall.drop_duplicates('query')
print(X_temp.describe())
for i in range(X_temp.shape[0]):
query = X_temp["query"].iloc[i].lower()
qsynonyms = query.split()
for word in query.split():
#print "word:",word
try:
s = model.most_similar(word, topn=3)
qlist = []
for item,sim in s:
if sim>0.6:
qlist.append(item.lower())
#print "word: %s synonyma: %r"%(word,qlist)
qsynonyms = qsynonyms+qlist
except:
pass
#print qsynonyms
qsynonyms = (" ").join(z.replace("_"," ") for z in qsynonyms)
#print qsynonyms
#Xall["query"].iloc[i]=qsynonyms
query_map[query]=qsynonyms
#raw_input()
if i%20==0:
print("i:",i)
with open("w2v_querymap.pkl", "w") as f: pickle.dump(query_map, f)
with open("w2v_querymap.pkl", "r") as f: query_map = pickle.load(f)
print("Mapping synonyma to query...")
for i in range(Xall.shape[0]):
query = Xall["query"].iloc[i].lower()
Xall["query"].iloc[i]=query_map[query]
if i%5000==0:
print("i:",i)
print(Xall['query'].iloc[:10])
return Xall
def additionalFeatures(Xall,verbose=False,dropList=['bestmatch']):
print("Computing additional features...")
stemmer = PorterStemmer()
Xall_new = np.zeros((Xall.shape[0],13))
for i in range(Xall.shape[0]):
query = Xall["search_term"].iloc[i].lower()
title = Xall["product_title"].iloc[i].lower()
desc = Xall["product_description"].iloc[i].lower()
#here we should get similars...
similar_words = [getSynonyms(q,stemmer) for q in query.split()]
similar_words = set(itertools.chain(*similar_words))
#is it necessary???
query=re.sub("[^a-zA-Z0-9]"," ", query)
query= (" ").join([stemmer.stem(z) for z in query.split()])
title=re.sub("[^a-zA-Z0-9]"," ", title)
title= (" ").join([stemmer.stem(z) for z in title.split()])
desc=re.sub("[^a-zA-Z0-9]"," ", desc)
desc= (" ").join([stemmer.stem(z) for z in desc.split()])
#start here
nquery = len(query.split())
ntitle = len(title.split())
ndesc = len(desc.split())
Xall_new[i,0] = nquery
Xall_new[i,1] = ntitle
Xall_new[i,2] = nquery / float(ntitle)
Xall_new[i,3] = ndesc+1
Xall_new[i,4] = nquery / float(ndesc+1)
s = difflib.SequenceMatcher(None,a=query,b=title).ratio()
Xall_new[i,5] = s
nmatches = 0
avgsim = 0.0
lastsim = 0.0
firstsim = 0.0
checksynonyma = 0.0
#find number of matches, split in first and last word of serach term
for qword in query.split():
# in case we have a perfect match
if qword in title:
nmatches+=1
avgsim = avgsim + 1.0
if qword == query.split()[-1]:
lastsim+=1
if qword == query.split()[0]:
firstsim+=1
# otherwise get string similarity
else:
bestvalue=0.0
for tword in title.split():
s = difflib.SequenceMatcher(None,a=qword,b=tword).ratio()
if s>bestvalue:
bestvalue=s
avgsim = avgsim + bestvalue
if qword == query.split()[-1]:
lastsim = lastsim + bestvalue
if qword == query.split()[0]:
firstsim = firstsim + bestvalue
#check similar
#print "qword:",qword
#if similar_words is not None:
for simword in similar_words:
if simword in title:
checksynonyma+=1
Xall_new[i,6] = nmatches / float(nquery)
Xall_new[i,7] = avgsim / float(nquery)
Xall_new[i,8] = information_entropy(query)
Xall_new[i,9] = information_entropy(title)
Xall_new[i,10] = lastsim
Xall_new[i,11] = firstsim
Xall_new[i,12] = checksynonyma / float(nquery)
if i%5000==0:
print("i:",i)
if verbose:
print(query)
print(nquery)
print(title)
print(ntitle)
print("ratio:",Xall_new[i,2])
print("difflib ratio:",s)
print("matches:",nmatches)
input()
Xall_new = pd.DataFrame(Xall_new,columns=['query_length','title_length','query_title_ratio','desc_length','query_desc_ratio','difflibratio','bestmatch','averagematch','S_query','S_title','last_sim','first_sim','checksynonyma',])
Xall_new = Xall_new.drop(dropList, axis=1)
print(Xall_new.corr(method='spearman'))
return Xall_new
def additionalFeatures_new(Xall,verbose=False,dropList=['bestmatch']):
print("Computing additional features new...")
stemmer = PorterStemmer()
Xall_new = np.zeros((Xall.shape[0],11))
for i in range(Xall.shape[0]):
query = Xall["search_term"].iloc[i].lower()
title = Xall["product_info"].iloc[i].lower()
#here we should get similars...
similar_words = [getSynonyms(q,stemmer) for q in query.split()]
similar_words = set(itertools.chain(*similar_words))
#is it necessary???
query=re.sub("[^a-zA-Z0-9]"," ", query)
query= (" ").join([stemmer.stem(z) for z in query.split()])
title=re.sub("[^a-zA-Z0-9]"," ", title)
title= (" ").join([stemmer.stem(z) for z in title.split()])
#start here
nquery = len(query.split())
ntitle = len(title.split())
Xall_new[i,0] = nquery
Xall_new[i,1] = ntitle
Xall_new[i,2] = nquery / float(ntitle)
s = difflib.SequenceMatcher(None,a=query,b=title).ratio()
Xall_new[i,3] = s
nmatches = 0
avgsim = 0.0
lastsim = 0.0
firstsim = 0.0
checksynonyma = 0.0
#find number of matches, split in first and last word of serach term
for qword in query.split():
# in case we have a perfect match
if qword in title:
nmatches+=1
avgsim = avgsim + 1.0
if qword == query.split()[-1]:
lastsim+=1
if qword == query.split()[0]:
firstsim+=1
# otherwise get string similarity
else:
bestvalue=0.0
for tword in title.split():
s = difflib.SequenceMatcher(None,a=qword,b=tword).ratio()
if s>bestvalue:
bestvalue=s
avgsim = avgsim + bestvalue
if qword == query.split()[-1]:
lastsim = lastsim + bestvalue
if qword == query.split()[0]:
firstsim = firstsim + bestvalue
#check similar
#print "qword:",qword
#if similar_words is not None:
for simword in similar_words:
if simword in title:
checksynonyma+=1
Xall_new[i,4] = nmatches / float(nquery)
Xall_new[i,5] = avgsim / float(nquery)
Xall_new[i,6] = information_entropy(query)
Xall_new[i,7] = information_entropy(title)
Xall_new[i,8] = lastsim
Xall_new[i,9] = firstsim
Xall_new[i,10] = checksynonyma / float(nquery)
if i%5000==0:
print("i:",i)
if verbose:
print(query)
print(nquery)
print(title)
print(ntitle)
print("ratio:",Xall_new[i,2])
print("difflib ratio:",s)
print("matches:",nmatches)
input()
Xall_new = pd.DataFrame(Xall_new,columns=['query_length','title_length','query_title_ratio','difflibratio','bestmatch','averagematch','S_query','S_title','last_sim','first_sim','checksynonyma',])
Xall_new = Xall_new.drop(dropList, axis=1)
print(Xall_new.corr(method='spearman'))
return Xall_new
def cleanse_data(Xall):
print("Cleansing the data...")
with open("query_map.pkl", "r") as f: query_map = pickle.load(f)#key query value corrected value
ablist=[]
ablist.append((['ps','p.s.','play station','ps2','ps3','ps4'],'playstation'))
ablist.append((['ny','n.y.'],'new york'))
ablist.append((['tb','tera byte'],'gigabyte'))
ablist.append((['gb','giga byte'],'gigabyte'))
ablist.append((['t.v.','tv'],'television'))
ablist.append((['mb','mega byte'],'megabyte'))
ablist.append((['d.r.','dr'],'doctor'))
ablist.append((['phillips'],'philips'))
for i in range(Xall.shape[0]):
query = Xall["query"].iloc[i].lower()
#correct typos
if query in list(query_map.keys()):
query = query_map[query]
#correct abbreviations query
new_query =[]
for qword in query.split():
for ab,full in ablist:
if qword in ab:
qword = full
new_query.append(qword)
new_query = (" ").join(new_query)
Xall["query"].iloc[i] = new_query
title = Xall["product_title"].iloc[i].lower()
#correct abbreviations title
new_title=[]
for qword in title.split():
for ab,full in ablist:
if qword in ab:
qword = full
new_title.append(qword)
new_title = (" ").join(new_title)
Xall["product_title"].iloc[i] = new_title
if i%5000==0:
print("i:",i)
print("Finished")
return Xall
def genWord2VecFeatures_new(Xall,verbose=False):
model = gensim.models.Word2Vec.load_word2vec_format('/home/loschen/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
for i in range(Xall.shape[0]):
query = Xall["search_term"].iloc[i].lower()
title = Xall["product_title"].iloc[i].lower()
query=re.sub("[^a-zA-Z0-9]"," ", query)
title=re.sub("[^a-zA-Z0-9]"," ", title)
for qword in query.split():
for tword in title.split():
s = model.similarity(qword,tword)
print("query: %s title: %s sim: %4.2f"%(qword,tword,s))
print(model.most_similar(qword, topn=5))
print(model.most_similar(tword, topn=5))
input()
#make top 5 most similar in query and check again...
def genWord2VecFeatures(Xall,verbose=True,dropList=[]):
print("Compute word2vec features...")
#print Xall['query'].tolist()
#b = gensim.models.Word2Vec(brown.sents())
model = gensim.models.Word2Vec.load_word2vec_format('/home/loschen/Downloads/GoogleNews-vectors-negative300.bin.gz', binary=True)
Xall_new = np.zeros((Xall.shape[0],5))
for i in range(Xall.shape[0]):
query = Xall["search_term"].iloc[i].lower()
title = Xall["product_title"].iloc[i].lower()
query=re.sub("[^a-zA-Z0-9]"," ", query)
nquery = len(query.split())
title=re.sub("[^a-zA-Z0-9]"," ", title)
ntitle = len(title.split())
bestsim = 0.0
lastsim = 0.0
firstsim = 0.0
avgsim = 0.0
#print "Query:",query
#print "Title:",title
for qword in query.split():
if qword in title:
bestsim = bestsim + 1.0
avgsim = avgsim +1.0
if qword == query.split()[-1]:
lastsim+=1
if qword == query.split()[0]:
firstsim+=1
else:
bestvalue=0.0
for tword in title.split():
try:
s = model.similarity(qword,tword)
#print "query: %s title: %s sim: %4.2f"%(qword,tword,s)
#print model.most_similar(qword, topn=5)
#print model.most_similar(tword, topn=5)
except:
s = 0.0
avgsim = avgsim + s
if s>bestvalue:
bestvalue=s
bestsim = bestsim + bestvalue
#print "bestvalue: %4.2f avg: %4.2f"%(bestvalue,avgsim)
if qword == query.split()[-1]:
lastsim = bestvalue
if qword == query.split()[0]:
firstsim = bestvalue
if i%5000==0:
print("i:",i)
Xall_new[i,0] = bestsim / float(nquery)
Xall_new[i,1] = lastsim
Xall_new[i,2] = firstsim
Xall_new[i,3] = avgsim / float(ntitle)
Xall_new[i,4] = avgsim
#raw_input()
Xall_new = pd.DataFrame(Xall_new,columns=['w2v_bestsim','w2v_lastsim','w2v_firstsim','w2v_avgsim','w2v_totalsim'])
Xall_new = Xall_new.drop(dropList, axis=1)
print(Xall_new.corr(method='spearman'))
return Xall_new
def useBenchmarkMethod(X,returnList=True,verbose=False):
print("Create benchmark features...")
X = X.fillna("")
stemmer = PorterStemmer()
s_data=[]
for i in range(X.shape[0]):
s=(" ").join(["q"+ z for z in BeautifulSoup(X["query"].iloc[i]).get_text(" ").split(" ")]) + " " + (" ").join(["z"+ z for z in BeautifulSoup(X["product_title"].iloc[i]).get_text(" ").split(" ")]) + " " + BeautifulSoup(X["product_description"].iloc[i]).get_text(" ")
s=re.sub("[^a-zA-Z0-9]"," ", s)
s= (" ").join([stemmer.stem(z) for z in s.split()])
s_data.append(s.lower())
if i%5000==0:
print("i:",i)
if returnList:
X = s_data
X = pd.DataFrame(X,columns=['query'])
else:
X = np.asarray(s_data)
X = X.reshape((X.shape[0],-1))
X = pd.DataFrame(X,columns=['concate_all'])
print("Finished..")
#print X
#print type(X[0])
return X
# Use Pandas to read in the training and test data
#train = pd.read_csv("../input/train.csv").fillna("")
#test = pd.read_csv("../input/test.csv").fillna("")
def build_query_correction_map(train,test,print_different=True):
# get all query
queries = set(train['search_term'].values)
correct_map = {}
if print_different:
print(("%30s \t %30s"%('original query','corrected query')))
for q in queries:
corrected_q = autocorrect_query(q,train=train,test=test,warning_on=False)
if print_different and q != corrected_q:
print(("%30s \t %30s"%(q,corrected_q)))
correct_map[q] = corrected_q
return correct_map
def autocorrect_query(query,train=None,test=None,cutoff=0.8,warning_on=True):
"""
https://www.kaggle.com/hiendang/crowdflower-search-relevance/auto-correct-query
autocorrect a query based on the training set
"""
if train is None:
train = pd.read_csv('./data/train.csv').fillna('')
if test is None:
test = pd.read_csv('./data/test.csv').fillna('')
train_data = train.values[train['search_term'].values==query,:]
test_data = test.values[test['search_term'].values==query,:]
s = ""
for r in train_data:
#print "----->r2:",r[2]
#print "r3:",r[3]
s = "%s %s %s"%(s,BeautifulSoup(r[2]).get_text(" ",strip=True),BeautifulSoup(r[3]).get_text(" ",strip=True))
#print "s:",s
#raw_input()
for r in test_data:
s = "%s %s %s"%(s,BeautifulSoup(r[2]).get_text(" ",strip=True),BeautifulSoup(r[3]).get_text(" ",strip=True))
s = re.findall(r'[\'\"\w]+',s.lower())
#print s
s_bigram = [' '.join(i) for i in bigrams(s)]
#print s_bigram
#raw_input()
s.extend(s_bigram)
corrected_query = []
for q in query.lower().split():
#print "q:",q
if len(q)<=2:
corrected_query.append(q)
continue
corrected_word = difflib.get_close_matches(q, s,n=1,cutoff=cutoff)
#print "correction:",corrected_word
if len(corrected_word) >0:
corrected_query.append(corrected_word[0])
else :
if warning_on:
print(("WARNING: cannot find matched word for '%s' -> used the original word"%(q)))
corrected_query.append(q)
#print "corrected_query:",corrected_query
#raw_input()
return ' '.join(corrected_query)
def autocorrect():
query_map = build_query_correction_map()
with open("query_map.pkl", "w") as f: pickle.dump(query_map, f)
|
lgpl-3.0
|
mjudsp/Tsallis
|
examples/tree/unveil_tree_structure.py
|
67
|
4824
|
"""
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %ss else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[i, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
|
bsd-3-clause
|
yuan-energy/Real-ESSI-Verification-Examples
|
model_fei_files/shearbeam_pisano_plastic/result/post_cyclic.py
|
2
|
3650
|
#!/usr/bin/python
import scipy as sp
import matplotlib.pyplot as plt
import h5py
# import colorplot
import sys
import pylab as pl
# print sys.path
# exit(0)
# unload_prod = sp.loadtxt("unload.prod",dtype=sp.double)
# branches = sp.loadtxt("branches.txt",dtype=sp.int32)
# alphas = sp.loadtxt("alphas.txt",dtype=sp.double)
n1 = 101
n2 = 2000
# plotthese = unload_prod < 0
file_in=sys.argv[1]
colors = [ [1,0,0], [0,1,0], [0,0,1]]
markersizes = [5., 5., 20.]
f = h5py.File(file_in.format(0))
# f = h5py.File("vonmisesLT_test01.h5.feioutput".format(0))
t = f["time"][:]
exx = f["/Model/Elements/Outputs"][0,:]
eyy = f["/Model/Elements/Outputs"][1,:]
ezz = f["/Model/Elements/Outputs"][2,:]
exy = f["/Model/Elements/Outputs"][3,:]
exz = f["/Model/Elements/Outputs"][4,:]
eyz = f["/Model/Elements/Outputs"][5,:]
epxx = f["/Model/Elements/Outputs"][6,:]
epyy = f["/Model/Elements/Outputs"][7,:]
epzz = f["/Model/Elements/Outputs"][8,:]
epxy = f["/Model/Elements/Outputs"][9,:]
epxz = f["/Model/Elements/Outputs"][10,:]
epyz = f["/Model/Elements/Outputs"][11,:]
sxx = f["/Model/Elements/Outputs"][12,:]
syy = f["/Model/Elements/Outputs"][13,:]
szz = f["/Model/Elements/Outputs"][14,:]
sxy = f["/Model/Elements/Outputs"][15,:]
sxz = f["/Model/Elements/Outputs"][16,:]
syz = f["/Model/Elements/Outputs"][17,:]
print "t.shape = ", t.shape
print "sxx.shape = ", sxx.shape
gamma = 2*exz
tau = sxz/1e3
evol = -(exx + eyy + ezz)
p = -(sxx + syy + szz)/3
nt = len(t)
ns = len(sxx)
n = min((nt,ns))
#
# Gamma - tau
#
# markers = plotthese[(i*(n1+n2) + n1):(i*(n1+n2) + n1+n2)]
fig=pl.figure(1)
plot=fig.add_subplot(111)
plot.plot(gamma[:-1], tau[:-1])
plot.tick_params(axis='both', which='major', labelsize=20)
plot.tick_params(axis='both', which='minor', labelsize=15)
pl.xlim([-0.003,0.003])
# if (i>0):
# plt.plot(gamma[markers], tau[markers],"ok")
plt.xlabel("Shear strain, $\\gamma [\\perthousand]$",fontsize=24)
plt.ylabel("Shear stress, $\\tau [kPa]$",fontsize=24)
plt.grid("on")
# if (i>0):
# plt.figure(2)
# plt.plot(gamma[:-1], tau[:-1],".-k")
# jj = 0
# for label in [120, 200, 300]:
# plotthese = branches==label
# markers = plotthese[(i*(n1+n2) + n1):(i*(n1+n2) + n1+n2)]
# plt.plot(gamma[markers], tau[markers],"o",label=str(label), color=colors[jj], markersize=markersizes[jj],alpha=0.5)
# jj+=1
# plt.legend()
# plt.xlabel("Shear strain, $\\gamma [\\perthousand]$")
# plt.ylabel("Shear stress, $\\tau [kPa]\$")
# plt.grid("on")
# plt.figure(2)
# plt.plot(t[:n], tau[:n])
# plt.xlabel("Time")
# plt.ylabel("Shear stress, $\\tau [kPa]\$")
# plt.grid("on")
# plt.figure(3)
# plt.plot(t[:n], gamma[:n])
# plt.xlabel("Time")
# plt.ylabel("Shear strain, $\\gamma [\\perthousand]$")
# plt.grid("on")
# #
# p - t
#
# plt.figure(2)
# plt.plot(t, p[:-1]/1000)
# plt.grid()
# plt.xlabel(r"Time, $t [s]$")
# plt.ylabel(r"Mean pressure, $p [kPa]$")
# #
# # evol - t
# #
# plt.figure(3)
# plt.plot(t, evol[:-1]*1000)
# plt.grid()
# plt.xlabel(r"Time, $t [s]$")
# plt.ylabel(r"Volumetric strain, $\epsilon_{\mathrm{vol}} [\perthousand]$")
# for i in range(1):
# plt.figure(i+1)
# ax = plt.gca()
# lines = ax.get_lines()
# lines[1].set_c("r")
# lines[1].set_linestyle("-")
# lines[1].set_marker(".")
# print alphas.shape
# print alphas
# plt.figure(3)
# plt.subplot(2,1,1)
# plt.plot(alphas[:,0],label="alpha")
# plt.plot(alphas[:,1],label="alpha0")
# plt.plot(alphas[:,2],label="alpha0mem")
# plt.subplot(2,1,2)
# plt.plot(alphas[:,3],label="nij_dev")
# plt.legend()
plt.show()
file_in=file_in[:-13]
fileout=file_in+'.png'
fig.savefig(fileout,bbox_inches='tight')
|
cc0-1.0
|
hugobowne/scikit-learn
|
examples/model_selection/plot_underfitting_overfitting.py
|
53
|
2668
|
"""
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
|
bsd-3-clause
|
depet/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
324
|
1862
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
|
bsd-3-clause
|
altairpearl/scikit-learn
|
examples/applications/svm_gui.py
|
124
|
11251
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
try:
import tkinter as Tk
except ImportError:
# Backward compat for Python 2
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
victor-prado/broker-manager
|
environment/lib/python3.5/site-packages/pandas/formats/printing.py
|
7
|
7767
|
"""
printing tools
"""
from pandas.types.inference import is_sequence
from pandas import compat
from pandas.compat import u
from pandas.core.config import get_option
def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts]
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in option "display.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("{%s}")
else:
fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(
next(s), _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt % body
def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt %
(pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs)
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = "'%s'"
else:
fmt = "u'%s'"
result = fmt % as_escaped_unicode(thing)
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
|
mit
|
chapmanbe/utah_highschool_airquality
|
windrose/pm25rose.py
|
1
|
20820
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '1.4'
__author__ = 'Lionel Roubeyrie'
__mail__ = '[email protected]'
__license__ = 'CeCILL-B'
import matplotlib
import matplotlib.cm as cm
import numpy as np
from matplotlib.patches import Rectangle
# from matplotlib.ticker import ScalarFormatter, AutoLocator
# from matplotlib.text import Text, FontProperties
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
import matplotlib.pyplot as plt
from pylab import poly_between
RESOLUTION = 100
ZBASE = -1000 #The starting zorder for all drawing, negative to have the grid on
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
#Uncomment to have the possibility to change the resolution directly
#when the instance is created
#self.RESOLUTION = kwargs.pop('resolution', 100)
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.radii_angle = 67.5
self.cla()
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 45)
self.theta_labels = ['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels, fontsize = 10) # Changed Font Size for Wind Directoin
self._info = {'dir' : list(),
'bins' : list(),
'table' : list()}
self.patches_list = list()
def _colors(self, cmap, n):
'''
Returns a list of n colors based on the colormap cmap
'''
return [cmap(i) for i in np.linspace(0.0, 1.0, n)]
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
null = kwargs.pop('labels', None)
angle = kwargs.pop('angle', None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
radii = np.linspace(0.1, self.get_rmax(), 6)
radii_labels = [ "%.1f" %r for r in radii ]
radii_labels[0] = "" #Removing label 0
null = self.set_rgrids(radii=radii, labels=radii_labels,
angle=self.radii_angle, **kwargs)
def _update(self):
self.set_rmax(rmax=np.max(np.sum(self._info['table'], axis=0)))
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc='lower left', **kwargs):
"""
Sets the legend location and her properties.
The location codes are
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
If none of these are suitable, loc can be a 2-tuple giving x,y
in axes coords, ie,
loc = (0, 1) is left top
loc = (0.5, 0.5) is center, center
and so on. The following kwargs are supported:
isaxes=True # whether this is an axes legend
prop = FontProperties(size='smaller') # the font property
pad = 0.2 # the fractional whitespace inside the legend border
shadow # if True, draw a shadow behind legend
labelsep = 0.005 # the vertical space between the legend entries
handlelen = 0.05 # the length of the legend lines
handletextsep = 0.02 # the space between the legend line and legend text
axespad = 0.02 # the border between the axes and legend edge
"""
def get_handles():
handles = list()
for p in self.patches_list:
if isinstance(p, matplotlib.patches.Polygon) or \
isinstance(p, matplotlib.patches.Rectangle):
color = p.get_facecolor()
elif isinstance(p, matplotlib.lines.Line2D):
color = p.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(Rectangle((0, 0), 0.2, 0.2,
facecolor=color, edgecolor='black'))
return handles
def get_labels():
labels = np.copy(self._info['bins'])
labels = ["[%.1f : %0.1f]" %(labels[i], labels[i+1]) \
for i in range(len(labels)-1)] ## Legend Labels and units
return labels
null = kwargs.pop('labels', None)
null = kwargs.pop('handles', None)
handles = get_handles()
labels = get_labels()
self.legend_ = matplotlib.legend.Legend(self, handles, labels,
loc, **kwargs)
return self.legend_
def _init_plot(self, dir, var, **kwargs):
"""
Internal method used by all plotting commands
"""
#self.cla()
null = kwargs.pop('zorder', None)
#Init of the bins array if not set
bins = kwargs.pop('bins', None)
if bins is None:
bins = np.array([0, 60, 75, 95, 115]) ## Set the range of color scale for PM25
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
#Number of sectors
nsector = kwargs.pop('nsector', None)
if nsector is None:
nsector = 16 ## Number of sections
#Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop('colors', None)
cmap = kwargs.pop('cmap', None)
if colors is not None:
if isinstance(colors, str):
colors = [colors]*nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = cm.jet
colors = self._colors(cmap, nbins)
#Building the angles list
angles = np.arange(0, -2*np.pi, -2*np.pi/nsector) + np.pi/2
normed = kwargs.pop('normed', False)
blowto = kwargs.pop('blowto', False)
#Set the global information dictionnary
self._info['dir'], self._info['bins'], self._info['table'] = histogram(dir, var, bins, nsector, normed, blowto)
return bins, nbins, nsector, colors, angles, kwargs
def contour(self, dir, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder,
**kwargs)
self.patches_list.extend(patch)
self._update()
def contourf(self, dir, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
null = kwargs.pop('edgecolor', None)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
xs, ys = poly_between(angles, 0, val)
patch = self.fill(xs, ys, facecolor=colors[i],
edgecolor=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
def bar(self, dir, var, **kwargs):
"""
Plot a windrose in bar mode. For each var bins and for each sector,
a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
* opening : float - between 0.0 and 1.0, to control the space between
each sector (1.0 for no space)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = kwargs.pop('opening', None)
if opening is None:
opening = 0.8
dtheta = 2*np.pi/nsector
opening = dtheta*opening
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening/2, offset), opening, val,
facecolor=colors[i],
edgecolor=edgecolor,
zorder=zorder,
**kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def box(self, dir, var, **kwargs):
"""
Plot a windrose in proportional bar mode. For each var bins and for each
sector, a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = np.linspace(0.0, np.pi/16, nbins)
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening[i]/2, offset), opening[i],
val, facecolor=colors[i], edgecolor=edgecolor,
zorder=zorder, **kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def histogram(dir, var, bins, nsector, normed=False, blowto=False):
"""
Returns an array where, for each sector of wind
(centred on the north), we have the number of time the wind comes with a
particular var (speed, polluant concentration, ...).
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
* bins : list - list of var category against we're going to compute the table
* nsector : integer - number of sectors
* normed : boolean - The resulting table is normed in percent or not.
* blowto : boolean - Normaly a windrose is computed with directions
as wind blows from. If true, the table will be reversed (usefull for
pollutantrose)
"""
if len(var) != len(dir):
raise (ValueError, "var and dir must have same length")
angle = 360./nsector
dir_bins = np.arange(-angle/2 ,360.+angle, angle, dtype=np.float)
dir_edges = dir_bins.tolist()
dir_edges.pop(-1)
dir_edges[0] = dir_edges.pop(-1)
dir_bins[0] = 0.
var_bins = bins.tolist()
var_bins.append(np.inf)
if blowto:
dir = dir + 180.
dir[dir>=360.] = dir[dir>=360.] - 360
table = histogram2d(x=var, y=dir, bins=[var_bins, dir_bins],
normed=False)[0]
# add the last value to the first to have the table of North winds
table[:,0] = table[:,0] + table[:,-1]
# and remove the last col
table = table[:, :-1]
if normed:
table = table*100/table.sum()
return dir_edges, var_bins, table
def wrcontour(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contour(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrcontourf(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contourf(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbox(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.box(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbar(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.bar(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def clean(dir, var):
'''
Remove masked values in the two arrays, where if a direction data is masked,
the var data will also be removed in the cleaning process (and vice-versa)
'''
dirmask = dir.mask==False
varmask = var.mask==False
ind = dirmask*varmask
return dir[ind], var[ind]
if __name__=='__main__':
from pylab import figure, show, setp, random, grid, draw
vv = random(500)*115
dv = random(500)*360
fig = figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
# ax.contourf(dv, vv, bins=np.arange(0,8,1), cmap=cm.hot)
# ax.contour(dv, vv, bins=np.arange(0,8,1), colors='k')
# ax.bar(dv, vv, normed=True, opening=0.8, edgecolor='white')
ax.box(dv, vv, normed=True, colors=('green', 'yellow', 'orange', 'red', 'purple'))
l = ax.legend()
setp(l.get_texts(), fontsize=8)
draw()
#print ax._info
show()
|
apache-2.0
|
jzt5132/scikit-learn
|
sklearn/metrics/cluster/tests/test_unsupervised.py
|
230
|
2823
|
import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
|
bsd-3-clause
|
jonathansick/androcmd
|
scripts/starfish_magphys/plot_magphys_phat_patches.py
|
1
|
6560
|
#!/usr/bin/env python
# encoding: utf-8
"""
Plot MAGPHYS estimates of PHAT fields.
2015-07-16 - Created by Jonathan Sick
"""
import argparse
import os
import numpy as np
import h5py
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.gridspec as gridspec
from palettable.cubehelix import perceptual_rainbow_16
from androcmd.phatpatchfit.pipeline import load_field_patches
from androcmd.phatpatchfit import load_galex_map, setup_galex_axes
def main():
args = parse_args()
base_path = os.path.splitext(args.path)[0]
# plot_mean_age(args.path, base_path + '_mean_age')
plot_estimates_grid(args.path, base_path + '_est_grid')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('path',
help='andromass pixel table with MAGPHYS estimates')
return parser.parse_args()
def plot_mean_age(sed_path, plot_path):
basemap = load_galex_map()
dataset = h5py.File(sed_path, 'r')
t = dataset['estimates']
log_age_M = t['log_age_M']
age_gyr = 10. ** (log_age_M - 9.)
polys = _make_footprint_list()
fig = Figure(figsize=(3.5, 3.5), frameon=False)
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(1, 2,
left=0.12, right=0.85, bottom=0.15, top=0.95,
wspace=0., hspace=None,
width_ratios=(1, 0.08), height_ratios=None)
ax = setup_galex_axes(fig, gs[0], basemap)
ax_cb = fig.add_subplot(gs[1])
print age_gyr[:, 2].flatten()
cmap = perceptual_rainbow_16.mpl_colormap
norm = mpl.colors.Normalize(vmin=0, vmax=10, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(age_gyr[:, 2].flatten())
poly_args = {'edgecolor': 'k', 'lw': 0.5,
'transform': ax.get_transform('world'),
'closed': True}
for poly, age in zip(polys, age_gyr):
patch = mpl.patches.Polygon(poly,
facecolor=mapper.to_rgba(age[2]),
**poly_args)
ax.add_patch(patch)
cb = fig.colorbar(mapper, cax=ax_cb)
cb.set_label(r'$\langle A~\mathrm{Gyr}^{-1} \rangle$')
ax.coords[0].ticklabels.set_size(8)
ax.coords[1].ticklabels.set_size(8)
gs.tight_layout(fig, pad=1.08, h_pad=None, w_pad=None, rect=None)
canvas.print_figure(plot_path + ".pdf", format="pdf")
def plot_estimates_grid(sed_path, plot_path):
dataset = h5py.File(sed_path, 'r')
fig = Figure(figsize=(6.5, 7), frameon=False)
canvas = FigureCanvas(fig)
gs = gridspec.GridSpec(2, 3,
left=0.04, right=0.96, bottom=0.07, top=0.95,
wspace=0.15, hspace=0.15,
width_ratios=None, height_ratios=None)
gs_settings = dict(height_ratios=(1, 0.05), hspace=0.01)
gs_age = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0, 0],
**gs_settings)
gs_sfr = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[1, 0],
**gs_settings)
gs_Z = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0, 1],
**gs_settings)
gs_mu = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[1, 1],
**gs_settings)
gs_tau = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0, 2],
**gs_settings)
gs_ism = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[1, 2],
**gs_settings)
t = dataset['estimates']
_plot_grid_ax(10. ** (t['log_age_M'] - 9.), fig, gs_age,
vmin=0., vmax=12.,
label=r'$\langle A~\mathrm{Gyr}^{-1} \rangle$')
_plot_grid_ax(t['Z_Zo'], fig, gs_Z,
vmin=-2, vmax=0.3,
label=r'$\log Z/Z_\odot$')
print t.dtype
_plot_grid_ax(scale_sfr(t['SFR_0.1Gyr']), fig, gs_sfr,
vmin=1.0, vmax=1.25,
label=r'$\langle \log \mathrm{SFR}_{100~\mathrm{Myr}} \rangle$')
_plot_grid_ax(t['mu'], fig, gs_mu,
vmin=0., vmax=1,
label=r'$\mu$')
_plot_grid_ax(t['tau_V'], fig, gs_tau,
vmin=0., vmax=2,
label=r'$\tau_V$')
_plot_grid_ax(t['tau_V_ISM'], fig, gs_ism,
vmin=0., vmax=0.5,
label=r'$\tau_\mathrm{ISM}$')
canvas.print_figure(plot_path + ".pdf", format="pdf")
def _plot_grid_ax(data, fig, gs, vmin=0., vmax=1.,
label=None, cb_locator=mpl.ticker.MaxNLocator(4)):
basemap = load_galex_map()
polys = _make_footprint_list()
ax = setup_galex_axes(fig, gs[0], basemap)
ax_cb = fig.add_subplot(gs[1])
print data[:, 2].flatten()
cmap = perceptual_rainbow_16.mpl_colormap
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)
mapper = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
mapper.set_array(data[:, 2].flatten())
poly_args = {'edgecolor': 'k', 'lw': 0.5,
'transform': ax.get_transform('world'),
'closed': True}
for poly, v in zip(polys, data):
patch = mpl.patches.Polygon(poly,
facecolor=mapper.to_rgba(v[2]),
**poly_args)
ax.add_patch(patch)
cb = fig.colorbar(mapper, cax=ax_cb, orientation='horizontal')
if label is not None:
cb.set_label(label, fontsize=9)
cb.ax.tick_params(labelsize=7)
cb.locator = cb_locator
cb.update_ticks()
ax.coords[0].ticklabels.set_visible(False)
ax.coords[1].ticklabels.set_visible(False)
def _make_footprint_list():
"""Make a list of footprints that matches the SED ID order"""
fields = load_field_patches()
return [patch['poly'] for patch in fields]
def scale_sfr(sfr):
fields = load_field_patches()
area_proj = np.atleast_2d(np.array([f['area_proj'] for f in fields])).T
print "area_proj"
print area_proj
area = area_proj / np.cos(77.5 * np.pi / 180.) / 1e3 / 1e3 # kpc^2
print "area"
print area
print "sfr"
print sfr
scaled_sfr = np.log10(10. ** sfr / area * 10. ** 3.)
print "scaled log(sfr)"
print scaled_sfr
return scaled_sfr
if __name__ == '__main__':
main()
|
mit
|
ilyes14/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
abimannans/scikit-learn
|
sklearn/metrics/ranking.py
|
79
|
25426
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
|
bsd-3-clause
|
calispac/digicampipe
|
digicampipe/scripts/plot_eventid_vs_time.py
|
1
|
3149
|
"""
plot event_id function of the time for the selected events
Usage:
plot_event_id_vs_time [options] [--] <INPUT>...
Options:
--help Show this help.
--event_id_start=INT minimum event id to plot. If none, no
minimum id is considered.
[Default: none]
--event_id_end=INT maximum event id to plot. If none, no
maximum id is considered.
[Default: none]
--plot=FILE Path of the image to be created.
Use "show" to open an interactive plot instead
of creating a file.
Use "none" to skip the plot.
[Default: show]
"""
from docopt import docopt
import matplotlib.pyplot as plt
import numpy as np
import os
from pandas import to_datetime
from digicampipe.io.event_stream import event_stream
from digicampipe.utils.docopt import convert_text
def entry(files, event_id_start, event_id_end, plot):
events = event_stream(
files,
event_id_range=(int(event_id_start), int(event_id_end))
)
events_id = []
events_ts = []
baselines_mean = []
for i, event in enumerate(events):
tel = event.r0.tels_with_data[0]
clock_ns = event.r0.tel[tel].local_camera_clock
event_id = event.r0.tel[tel].camera_event_number
baseline_mean = np.mean(event.r0.tel[tel].digicam_baseline)
events_ts.append(clock_ns)
events_id.append(event_id)
baselines_mean.append(baseline_mean)
events_ts = np.array(events_ts)
events_id = np.array(events_id)
baselines_mean = np.array(baselines_mean)
order = np.argsort(events_ts)
events_ts = events_ts[order]
events_id = events_id[order]
baselines_mean = baselines_mean[order]
if plot.lower() is not None:
print('plotted with respect to t=', to_datetime(events_ts[0]))
fig1 = plt.figure(figsize=(8, 8))
plt.subplot(2, 2, 1)
plt.plot((events_ts-events_ts[0]) * 1e-9, events_id, '.')
plt.xlabel('$t [s]$')
plt.ylabel('event_id')
plt.subplot(2, 2, 2)
plt.hist(np.diff(events_ts), np.arange(150, 400, 4))
plt.xlim(150, 400)
plt.xlabel('$\Delta t [ns]$')
plt.ylabel('# of events')
plt.subplot(2, 1, 2)
plt.plot((events_ts-events_ts[0]) * 1e-9, baselines_mean, '.')
plt.xlabel('$t [s]$')
plt.ylabel('mean baseline [LSB]')
plt.tight_layout()
if plot == "show":
plt.show()
else:
output_path = os.path.dirname(plot)
if not os.path.exists(output_path):
os.makedirs(output_path)
plt.savefig(plot)
plt.close(fig1)
return
if __name__ == '__main__':
args = docopt(__doc__)
files = args['<INPUT>']
event_id_start = args['--event_id_start']
event_id_end = args['--event_id_end']
plot = convert_text(args['--plot'])
entry(files, event_id_start, event_id_end, plot)
|
gpl-3.0
|
simonsfoundation/CaImAn
|
sandbox/demo_caiman_basic_dendrites.py
|
2
|
8519
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stripped demo for running the CNMF source extraction algorithm with CaImAn and
evaluation the components. The analysis can be run either in the whole FOV
or
For a complete pipeline (including motion correction) check demo_pipeline.py
Data courtesy of W. Yang, D. Peterka and R. Yuste (Columbia University)
This demo is designed to be run under spyder or jupyter; its plotting functions
are tailored for that environment.
@authors: @agiovann and @epnev
"""
from __future__ import print_function
from builtins import range
import cv2
try:
cv2.setNumThreads(0)
except:
pass
try:
if __IPYTHON__:
print("Detected iPython")
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
pass
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import caiman as cm
from caiman.components_evaluation import estimate_components_quality_auto
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.paths import caiman_datadir
#%% start a cluster
try:
dview.terminate()
except:
pass
c, dview, n_processes =\
cm.cluster.setup_cluster(backend='local', n_processes=None,
single_thread=False)
#%% save files to be processed
# This datafile is distributed with Caiman
#fnames = ['/Users/agiovann/Dropbox (Simons Foundation)/ImagingData/DataCalciumImaging/purkinje_out/corrected_movies/M_FLUO_1.tif']
#fname_new = '/Users/agiovann/example_movies_ALL/memmap__d1_131_d2_131_d3_1_order_C_frames_2991_.mmap'
#fnames = ['/Users/agiovann/example_movies_ALL/2014-04-05-003.tif']
#fnames = ['/Users/agiovann/example_movies_ALL/quietBlock_1_ds_5.hdf5']
#fnames = ['/mnt/ceph/neuro/Tolias/quietBlock_2_ds_2_2.hdf5']
#fnames=['/Users/agiovann/example_movies_ALL/2014-04-05-003_sparse.tif']
fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC.hdf5']
fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC_ds_4_400_400.hdf5']
#fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC_ds_4_400_400_sparse.hdf5']
fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC_crop.tif']
fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC_ds_4_400_400_sparse_-2.hdf5']
fnames = ['/mnt/ceph/users/agiovann/ImagingData/Basu/495D2G_MC_ds_4_400_400_sparse_1.hdf5']
#%%
if False:
m = cm.load(fnames)
img = m.local_correlations_movie(file_name=fnames[0],dview=dview,swap_dim=False, window=100, order_mean = -2).astype(np.float32)
# img[np.isnan(img)]=0
# img[img>1]=1
# img=img*(img>0)
#%%
# location of dataset (can actually be a list of filed to be concatenated)
add_to_movie = 0
# if minimum is negative subtract to make the data non-negative
base_name = 'Yr'
fname_new = cm.save_memmap(fnames, dview=dview, base_name=base_name,
add_to_movie=add_to_movie, order='C')
#%% LOAD MEMORY MAPPABLE FILE
Yr, dims, T = cm.load_memmap(fname_new)
d1, d2 = dims
images = np.reshape(Yr.T, [T] + list(dims), order='F')
#%% play movie, press q to quit
play_movie = True
if play_movie:
cm.movie(images).play(fr=50, magnification=2, gain=2)
#%% correlation image. From here infer neuron size and density
Cn = cm.movie(images).local_correlations(swap_dim=False)
plt.cla()
plt.imshow(Cn, cmap='gray')
plt.title('Correlation Image')
#%% set up some parameters
is_patches = True # flag for processing in patches or not
if is_patches: # PROCESS IN PATCHES AND THEN COMBINE
rf = [50,50] # half size of each patch
stride = [20,20] # overlap between patches
K = 20 # number of components in each patch
else: # PROCESS THE WHOLE FOV AT ONCE
rf = None # setting these parameters to None
stride = None # will run CNMF on the whole FOV
K = 25 # number of neurons expected (in the whole FOV)
gSig = [6, 6] # expected half size of neurons
merge_thresh = 0.80 # merging threshold, max correlation allowed
p = 1 # order of the autoregressive system
gnb = 1 # global background order
border_pix = 5
#%% Now RUN CNMF
cnm = cnmf.CNMF(n_processes, method_init='sparse_nmf', k=K, gSig=gSig,
merge_thresh=merge_thresh, p=p, dview=dview, gnb=gnb,
rf=rf, stride=stride, rolling_sum=False, alpha_snmf = 0.1, border_pix=border_pix,
only_init_patch=False)
cnm = cnm.fit(images)
#%% plot contour plots of components
plt.figure()
crd = cm.utils.visualization.plot_contours(cnm.A, Cn, thr=0.9)
plt.title('Contour plots of components')
#%%
cm.movie(np.reshape(Yr-cnm.A.dot(cnm.C) - cnm.b.dot(cnm.f), dims + (-1,), order='F').transpose(2, 0, 1)).play(magnification=3, gain=10.)
#%% visualize selected components
cm.utils.visualization.view_patches_bar(Yr, cnm.A.tocsc()[:, :],
cnm.C[:, :], cnm.b, cnm.f,
dims[0], dims[1],
YrA=cnm.YrA[:, :], img=Cn)
#%%
A_in, C_in, b_in, f_in = cnm.A[:,:], cnm.C[:], cnm.b, cnm.f
cnm2 = cnmf.CNMF(n_processes=1, k=A_in.shape[-1], gSig=gSig, p=p, dview=dview,
merge_thresh=merge_thresh, Ain=A_in, Cin=C_in, b_in=b_in,
f_in=f_in, rf=None, stride=None, gnb=gnb,
method_deconvolution='oasis', check_nan=True,border_pix=border_pix)
cnm2 = cnm2.fit(images)
#%% COMPONENT EVALUATION
# the components are evaluated in three ways:
# a) the shape of each component must be correlated with the data
# b) a minimum peak SNR is required over the length of a transient
# c) each shape passes a CNN based classifier (this will pick up only neurons
# and filter out active processes)
fr = 3 # approximate frame rate of data
decay_time = 1 # length of transient
min_SNR = 2.5 # peak SNR for accepted components (if above this, acept)
rval_thr = 0.90 # space correlation threshold (if above this, accept)
use_cnn = False # use the CNN classifier
min_cnn_thr = 0.95 # if cnn classifier predicts below this value, reject
idx_components, idx_components_bad, SNR_comp, r_values, cnn_preds = \
estimate_components_quality_auto(images, cnm2.A, cnm2.C, cnm2.b, cnm2.f,
cnm2.YrA, fr, decay_time, gSig, dims,
dview=dview, min_SNR=min_SNR,
r_values_min=rval_thr, use_cnn=use_cnn,
thresh_cnn_min=min_cnn_thr, thresh_cnn_lowest=0)
#%%
cm.utils.visualization.plot_contours(cnm2.A, Cn, thr=0.9)
#%% visualize selected and rejected components
plt.figure()
plt.subplot(1, 2, 1)
cm.utils.visualization.plot_contours(cnm2.A[:, idx_components], Cn, thr=0.9)
plt.title('Selected components')
plt.subplot(1, 2, 2)
plt.title('Discaded components')
cm.utils.visualization.plot_contours(cnm2.A[:, idx_components_bad], Cn, thr=0.9)
#%%
plt.figure()
crd = cm.utils.visualization.plot_contours(cnm2.A.tocsc()[:,idx_components], Cn, thr=0.9)
plt.title('Contour plots of components')
#%% visualize selected components
cm.utils.visualization.view_patches_bar(Yr, cnm2.A.tocsc()[:, idx_components],
cnm2.C[idx_components, :], cnm2.b, cnm2.f,
dims[0], dims[1],
YrA=cnm2.YrA[idx_components, :], img=Cn)
#%% visualize selected components bad
cm.utils.visualization.view_patches_bar(Yr, cnm2.A.tocsc()[:, idx_components_bad],
cnm2.C[idx_components_bad, :], cnm2.b, cnm2.f,
dims[0], dims[1],
YrA=cnm2.YrA[idx_components_bad, :], img=Cn)
#%%
cm.movie(np.reshape(cnm2.A.tocsc()[:, idx_components_bad].dot(
cnm2.C[idx_components_bad]), dims + (-1,), order='F').transpose(2, 0, 1)).play(magnification=3, gain=10.)
#%%
cm.movie(np.reshape(cnm2.A.tocsc()[:, idx_components].dot(
cnm2.C[idx_components]), dims + (-1,), order='F').transpose(2, 0, 1)).play(magnification=3, gain=10.)
#%%
#%%
cm.movie(np.reshape(Yr-cnm2.A.dot(cnm2.C) - cnm2.b.dot(cnm2.f), dims + (-1,), order='F').transpose(2, 0, 1)).play(magnification=3, gain=10.)
#%% STOP CLUSTER and clean up log files
cm.stop_server()
log_files = glob.glob('Yr*_LOG_*')
for log_file in log_files:
os.remove(log_file)
|
gpl-2.0
|
LabMagUBO/StoneX
|
testing/Xmpl_SW.py
|
1
|
3319
|
#!/opt/local/bin/python
# -*- coding: utf-8 -*-
"""
Main program file.
Example of Stoner Wohlfarth.
StoneX V1.0 compatible
"""
################################################################################
# MODULES
################################################################################
## Path to the module
module_path = '/Users/zorg/These/Documents/Programmes/Python_Modules/'
## Define module path
import sys
sys.path.append(module_path)
# Importing StoneX module
import StoneX
import numpy as np
from matplotlib import pylab as pl
## Activate logging (cleaning the log on the first logger)
logger = StoneX.init_log(__name__, console_level='debug', file_level='debug', mode='w')
logger.info("Program version {}".format(StoneX.__version__))
################################################################################
# VSM CREATION
################################################################################
# First, lets create the VSM
vsm = StoneX.VSM()
# Set the vsm parameters
vsm.H = (0.5/StoneX.mu_0, 0.05/StoneX.mu_0, 0.15/StoneX.mu_0, 0.005/StoneX.mu_0, 'si')
vsm.phi = (0, 91, 15, 'deg')
vsm.T = (300, 1001, 1000, 'K')
# Plotting
vsm.plot_cycles = True
vsm.plot_azimuthal = True
vsm.plot_energyPath = False
vsm.plot_energyLandscape = False #Takes a lot of time
vsm.plot_T = False
# Export
vsm.export_data = True
# Displaying parameters
logger.info(vsm)
################################################################################
# SAMPLE CREATION
################################################################################
# First, we create a specific domain, changing if necessary the parameters
# Models available : Stoner_Wohlfarth, Meiklejohn_Bean, Garcia_Otero, Franco_Conde, Rotatable_AF, Double_MacroSpin
domain = StoneX.create_domain(StoneX.Stoner_Wohlfarth, 'Xmpl_SW')
# Setting the temperature
domain.T = 0
# Theta step
domain.theta = (0.1, 'deg')
# Setting the physical parameters
domain.Ms = 1.4e6 # Magnetization in A/m
r = 2e-9 # radius of the particule, in meter
domain.V_f = 4/3*np.pi * r**3
domain.K_f = 1e5 # Uniaxial anisotropy, in J/m**3
domain.gamma_f = 0 # Uniaxial anisotropy direction, in radians
domain.K_bq = 0
domain.gamma_bq = 0
domain.K_iso = 0
print(domain)
## Then we create a sample based on the domain
# Distribution parameters
N = 1000
xMin = np.log(0.03)/np.log(10)
xMax = np.log(5) / np.log(10)
nx = 10
mu = 2
sigma = 10
# LogNormal distribution
R = np.logspace(xMin, xMax, nx)
X, m, s = StoneX.lognormale(R, mu, sigma)
logger.info("""Distribution de probabilité
Normale : mu = {}, sigma = {}
Log-normale : m = {}, s = {}""".format(mu, sigma, m, s))
# Creating the sample
Density = N * X
sample = StoneX.create_sample(domain, Density)
if True:
pl.plot(R, np.around(X * N), '-ro')
pl.savefig('{}/distrib.pdf'.format(domain.name), dpi=100)
for i, radius in enumerate(R):
sample.domains[i].V_f = 4/3 * np.pi * (radius * 1e-9)**3
#print(sample.domains[i].V_f)
################################################################################
# MEASUREMENTS
################################################################################
# We can measure the sample or one domain only
vsm.load(domain)
vsm.measure()
# END OF PROGRAM
|
gpl-3.0
|
lodemo/CATANA
|
src/face_recognition/facenet/tmp/deepdream.py
|
4
|
10431
|
# boilerplate code
import numpy as np
from functools import partial
import PIL.Image
import tensorflow as tf
import matplotlib.pyplot as plt
import urllib2
import os
import zipfile
def main():
# download pre-trained model by running the command below in a shell
# wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip inception5h.zip
url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
data_dir = '../data/'
model_name = os.path.split(url)[-1]
local_zip_file = os.path.join(data_dir, model_name)
if not os.path.exists(local_zip_file):
# Download
model_url = urllib2.urlopen(url)
with open(local_zip_file, 'wb') as output:
output.write(model_url.read())
# Extract
with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
zip_ref.extractall(data_dir)
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(os.path.join(data_dir, model_fn), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
#pylint: disable=unused-variable
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add() #pylint: disable=maybe-no-member
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add() #pylint: disable=maybe-no-member
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def showarray(a):
a = np.uint8(np.clip(a, 0, 1)*255)
plt.imshow(a)
plt.show()
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for _ in range(iter_n):
g, _ = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
showarray(visstd(img))
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def render_multiscale(t_obj, img0=img_noise, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for _ in range(iter_n):
g = calc_grad_tiled(img, t_grad)
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
showarray(visstd(img))
def lap_split(img):
'''Split the image into lo and hi frequency components'''
with tf.name_scope('split'):
lo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')
lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])
hi = img-lo2
return lo, hi
def lap_split_n(img, n):
'''Build Laplacian pyramid with n splits'''
levels = []
for _ in range(n):
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def lap_merge(levels):
'''Merge Laplacian pyramid'''
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi
return img
def normalize_std(img, eps=1e-10):
'''Normalize image by making its standard deviation = 1.0'''
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img/tf.maximum(std, eps)
def lap_normalize(img, scale_n=4):
'''Perform the Laplacian pyramid normalization.'''
img = tf.expand_dims(img,0)
tlevels = lap_split_n(img, scale_n)
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0,:,:,:]
def render_lapnorm(t_obj, img0=img_noise, visfunc=visstd,
iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# build the laplacian normalization graph
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for _ in range(iter_n):
g = calc_grad_tiled(img, t_grad)
g = lap_norm_func(g)
img += g*step
showarray(visfunc(img))
def render_deepdream(t_obj, img0=img_noise,
iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# split the image into a number of octaves
img = img0
octaves = []
for _ in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw)/octave_scale))
hi = img-resize(lo, hw)
img = lo
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2])+hi
for _ in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g*(step / (np.abs(g).mean()+1e-7))
showarray(img/255.0)
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139 # picking some feature channel to visualize
render_naive(T(layer)[:,:,:,channel])
render_multiscale(T(layer)[:,:,:,channel])
k = np.float32([1,4,6,4,1])
k = np.outer(k, k)
k5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)
render_lapnorm(T(layer)[:,:,:,channel])
render_lapnorm(T(layer)[:,:,:,65])
render_lapnorm(T('mixed3b_1x1_pre_relu')[:,:,:,101])
render_lapnorm(T(layer)[:,:,:,65]+T(layer)[:,:,:,139], octave_n=4)
img0 = PIL.Image.open('pilatus800.jpg')
img0 = np.float32(img0)
showarray(img0/255.0)
render_deepdream(tf.square(T('mixed4c')), img0)
render_deepdream(T(layer)[:,:,:,139], img0)
if __name__ == '__main__':
main()
|
mit
|
darylsew/audiolearn
|
graph/recorder.py
|
1
|
3654
|
import matplotlib
matplotlib.use('TkAgg') # <-- THIS MAKES IT FAST!
import numpy
import scipy
import struct
import pyaudio
import threading
import pylab
import struct
class SwhRecorder:
"""Simple, cross-platform class to record from the microphone."""
def __init__(self):
"""minimal garb is executed when class is loaded."""
self.RATE=48100
self.BUFFERSIZE=2**12 #1024 is a good buffer size
self.secToRecord=.1
self.threadsDieNow=False
self.newAudio=False
def setup(self):
"""initialize sound card."""
#TODO - windows detection vs. alsa or something for linux
#TODO - try/except for sound card selection/initiation
self.buffersToRecord=int(self.RATE*self.secToRecord/self.BUFFERSIZE)
if self.buffersToRecord==0: self.buffersToRecord=1
self.samplesToRecord=int(self.BUFFERSIZE*self.buffersToRecord)
self.chunksToRecord=int(self.samplesToRecord/self.BUFFERSIZE)
self.secPerPoint=1.0/self.RATE
self.p = pyaudio.PyAudio()
self.inStream = self.p.open(format=pyaudio.paInt16,channels=1,rate=self.RATE,input=True,frames_per_buffer=self.BUFFERSIZE)
self.xsBuffer=numpy.arange(self.BUFFERSIZE)*self.secPerPoint
self.xs=numpy.arange(self.chunksToRecord*self.BUFFERSIZE)*self.secPerPoint
self.audio=numpy.empty((self.chunksToRecord*self.BUFFERSIZE),dtype=numpy.int16)
def close(self):
"""cleanly back out and release sound card."""
self.p.close(self.inStream)
### RECORDING AUDIO ###
def getAudio(self):
"""get a single buffer size worth of audio."""
audioString=self.inStream.read(self.BUFFERSIZE)
return numpy.fromstring(audioString,dtype=numpy.int16)
def record(self,forever=True):
"""record secToRecord seconds of audio."""
while True:
if self.threadsDieNow: break
for i in range(self.chunksToRecord):
self.audio[i*self.BUFFERSIZE:(i+1)*self.BUFFERSIZE]=self.getAudio()
self.newAudio=True
if forever==False: break
def continuousStart(self):
"""CALL THIS to start running forever."""
self.t = threading.Thread(target=self.record)
self.t.start()
def continuousEnd(self):
"""shut down continuous recording."""
self.threadsDieNow=True
### MATH ###
def downsample(self,data,mult):
"""Given 1D data, return the binned average."""
overhang=len(data)%mult
if overhang: data=data[:-overhang]
data=numpy.reshape(data,(len(data)/mult,mult))
data=numpy.average(data,1)
return data
def fft(self,data=None,trimBy=10,logScale=False,divBy=100):
if data==None:
data=self.audio.flatten()
left,right=numpy.split(numpy.abs(numpy.fft.fft(data)),2)
ys=numpy.add(left,right[::-1])
if logScale:
ys=numpy.multiply(20,numpy.log10(ys))
xs=numpy.arange(self.BUFFERSIZE/2,dtype=float)
if trimBy:
i=int((self.BUFFERSIZE/2)/trimBy)
ys=ys[:i]
xs=xs[:i]*self.RATE/self.BUFFERSIZE
if divBy:
ys=ys/float(divBy)
return xs,ys
### VISUALIZATION ###
def plotAudio(self):
"""open a matplotlib popup window showing audio data."""
pylab.plot(self.audio.flatten())
pylab.show()
|
mit
|
jay-johnson/redten-python
|
bins/predict.py
|
1
|
9252
|
#!/usr/bin/env python
import sys, urllib, urllib2, re, logging, json, uuid, ast, datetime, os, requests, time, collections
from redten.shellprinting import lg, good, boom, mark, anmt, info
from redten.redten_client import RedTenClient, ppj
"""
More documentation and samples:
- Predictions with the IRIS dataset: https://github.com/jay-johnson/sci-pype/blob/master/red10/Red10-IRIS-Predictions.ipynb
- Forecast: https://github.com/jay-johnson/sci-pype/blob/master/red10/Red10-SPY-Multi-Model-Price-Forecast.ipynb
"""
# Login to red10
rt = RedTenClient()
csv_file = "/opt/work/data/src/iris.csv"
rloc = ""
sloc = ""
ds_name = "iris"
title = "IRIS Predictions - " + str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S"))
desc = "This is a Description for - " + str(title)
# What column has the labeled targets as integers? (added-manually to the dataset)
target_column_name = "ResultLabel"
# possible values in the Target Column
target_column_values = [ "Iris-setosa", "Iris-versicolor", "Iris-virginica" ]
# What columns can the algorithms use for training and learning?
feature_column_names = [ "SepalLength", "SepalWidth", "PetalLength", "PetalWidth", "ResultTargetValue" ]
# What column holds string labels for the Target Column?
label_column_name = "ResultLabel"
ignore_features = [ # Prune non-int/float columns as needed:
target_column_name
]
if target_column_name != label_column_name:
ignore_features.append(label_column_name)
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
train_xgb = {
"learning_rate" : 0.20,
"num_estimators" : 50,
"sub_sample" : 0.20,
"col_sample_by_tree" : 0.90,
"col_sample_by_level" : 1.0,
"objective" : "reg:linear",
"max_depth" : 3,
"max_delta_step" : 0,
"min_child_weight" : 1,
"reg_alpha" : 0,
"reg_lambda" : 1,
"base_score" : 0.6,
"gamma" : 0,
"seed" : 42,
"silent" : True
}
label_column_name = target_column_name
test_ratio = 0.1
sample_filter_rules = {}
algo_name = "xgb-regressor"
# allow the target dataset to load from the env for automation with docker
if csv_file == "" and sloc == "" and rloc == "":
csv_file = str(os.getenv("ENV_REDTEN_CSV_FILE", "")).strip().lstrip()
post_data = {
"predict_this_data" : predict_row,
"title" : title,
"desc" : desc,
"ds_name" : ds_name,
"feature_column_names" : feature_column_names,
"ignore_features" : ignore_features,
"csv_file" : csv_file,
"rloc" : rloc,
"sloc" : sloc,
"algo_name" : algo_name,
"target_column_values" : target_column_values,
"target_column_name" : target_column_name,
"label_column_name" : label_column_name,
"prediction_type" : "Predict",
"ml_type" : "Predict with Filter",
"train" : train_xgb
}
"""
Wait on the Job to finish
"""
anmt("Launching Job")
job_id = None
job_data = {}
job_report = {}
job_res = {}
job_response = rt.run_job(post_data=post_data)
if job_response["status"] != "valid":
boom("Predict job failed with error=" + str(job_response["status"]))
sys.exit(1)
else:
if "id" not in job_response["data"]:
boom("Failed to create new Predict job")
sys.exit(1)
else:
job_id = job_response["data"]["id"]
job_status = job_response["data"]["status"]
lg("Started Predict job=" + str(job_id) + " with current status=" + str(job_status))
# end of if job was valid or not
lg("Started Predict=" + str(ds_name) + " job=" + str(job_id), 6)
"""
Wait on the Job to finish
"""
if job_id == None:
boom("Failed to start a new job")
sys.exit(1)
else:
lg("Waiting on results", 6)
job_res = rt.wait_on_job(job_id)
if job_res["status"] != "SUCCESS":
boom("Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"]))
sys.exit(1)
else:
job_data = job_res["record"]
anmt("Job Report:")
lg(ppj(job_data), 5)
# end of waiting
"""
Get Job Analysis
"""
job_report = {}
if job_id == None:
boom("Failed to start a new job")
sys.exit(1)
else:
# Get the analysis, but do not auto-show the plots
job_report = rt.get_job_analysis(job_id, show_plots=False)
if len(job_report) == 0:
boom("Job=" + str(job_id) + " failed")
sys.exit(1)
else:
lg("")
# if the job failed
# end of get job analysis
"""
Build Prediction Results
"""
lg("Building Prediction=" + str(ds_name) + " Results for job=" + str(job_id), 6)
# Build the prediction accuracy dictionary from the analysis
# and show the Predict dataframes
acc_results = rt.build_prediction_results(job_report)
# for all columns in the accuracy dictionary:
for col in acc_results:
col_node = acc_results[col]
lg("Column=" + str(col) + " accuracy=" + str(col_node["accuracy"]) + " mse=" + str(col_node["mse"]) + " num_predictions=" + str(len(col_node["predictions_df"].index)))
# show the predictions
lg(col_node["predictions_df"].head(5))
lg("")
# end of showing prediction results
anmt("Building a new prediction from pre-trained, cached models")
predict_row = {
"SepalLength" : 5.4,
"SepalWidth" : 3.4,
"PetalLength" : 1.7,
"PetalWidth" : 0.2,
"ResultTargetValue" : 0
}
post_data = {
"use_cached_job_id" : job_id,
"predict_this_data" : predict_row,
"title" : title,
"desc" : desc,
"ds_name" : ds_name,
"feature_column_names" : feature_column_names,
"ignore_features" : ignore_features,
"csv_file" : csv_file,
"rloc" : rloc,
"sloc" : sloc,
"algo_name" : algo_name,
"target_column_values" : target_column_values,
"target_column_name" : target_column_name,
"label_column_name" : label_column_name,
"prediction_type" : "Predict",
"ml_type" : "Predict with Filter",
"user_id" : 2,
"train" : train_xgb,
"max_features" : 10,
"version" : 1
}
job_data = {}
job_report = {}
job_res = {}
job_response = rt.run_job(post_data=post_data)
if job_response["status"] != "valid":
boom("Predict job failed with error=" + str(job_response["status"]))
sys.exit(1)
else:
if "id" not in job_response["data"]:
boom("Failed to create new Predict job")
sys.exit(1)
else:
job_id = job_response["data"]["id"]
job_status = job_response["data"]["status"]
lg("Started Predict job=" + str(job_id) + " with current status=" + str(job_status))
# end of if job was valid or not
lg("Started Predict=" + str(ds_name) + " job=" + str(job_id), 6)
"""
Wait on the New Prediction Job to finish
"""
if job_id == None:
boom("Failed to start a new Prediction job")
else:
lg("Waiting on new Prediction results", 6)
job_res = rt.wait_on_job(job_id)
if job_res["status"] != "SUCCESS":
boom("New Prediction Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"]))
else:
job_data = job_res["record"]
anmt("New Prediction Job Report:")
lg(ppj(job_data), 5)
# end of waiting
"""
Get New Prediction Job Analysis
"""
job_report = {}
if job_id == None:
boom("Failed to start a new job")
else:
# Get the analysis, but do not auto-show the plots
job_report = rt.get_job_analysis(job_id, show_plots=False)
if len(job_report) == 0:
boom("Job=" + str(job_id) + " failed")
sys.exit(1)
else:
lg("")
# if the job failed
# end of get job analysis
"""
Build New Prediction Results
"""
lg("Building New Prediction=" + str(ds_name) + " Results for job=" + str(job_id), 6)
# Build the prediction accuracy dictionary from the analysis
# and show the Predict dataframes
acc_results = rt.build_prediction_results(job_report)
# for all columns in the accuracy dictionary:
for col in acc_results:
col_node = acc_results[col]
lg("Column=" + str(col) + " accuracy=" + str(col_node["accuracy"]) + " mse=" + str(col_node["mse"]) + " num_predictions=" + str(len(col_node["predictions_df"].index)))
# show the predictions
lg(col_node["predictions_df"].head(5))
lg("")
# end of showing prediction results
"""
Get New Prediction's Analysis Images
"""
lg("Getting Predict=" + str(ds_name) + " Analysis Images for job=" + str(job_id), 6)
# unless matplotlib is installed this will fail showing plots:
job_res = rt.get_job_analysis(job_id, show_plots=False)
sys.exit(0)
|
apache-2.0
|
shijx12/DeepSim
|
lib/roi_data_layer/minibatch.py
|
5
|
8725
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
import numpy as np
import numpy.random as npr
import cv2
import os
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
# <<<< obsolete
from ..utils.blob import prep_im_for_blob, im_list_to_blob
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image)
# Get the input image blob, formatted for caffe
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
blobs = {'data': im_blob}
if cfg.TRAIN.HAS_RPN:
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds] \
if 'gt_ishard' in roidb[0] else np.zeros(gt_inds.size, dtype=int)
# blobs['gt_ishard'] = roidb[0]['gt_ishard'][gt_inds]
blobs['dontcare_areas'] = roidb[0]['dontcare_areas'] * im_scales[0] \
if 'dontcare_areas' in roidb[0] else np.zeros([0, 4], dtype=float)
blobs['im_info'] = np.array(
[[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],
dtype=np.float32)
blobs['im_name'] = os.path.basename(roidb[0]['image'])
else: # not using RPN
# Now, build the region of interest and label blobs
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0), dtype=np.float32)
bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32)
bbox_inside_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32)
# all_overlaps = []
for im_i in xrange(num_images):
labels, overlaps, im_rois, bbox_targets, bbox_inside_weights \
= _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image,
num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels, bbox targets, and bbox loss blobs
labels_blob = np.hstack((labels_blob, labels))
bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets))
bbox_inside_blob = np.vstack((bbox_inside_blob, bbox_inside_weights))
# all_overlaps = np.hstack((all_overlaps, overlaps))
# For debug visualizations
# _vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps)
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
if cfg.TRAIN.BBOX_REG:
blobs['bbox_targets'] = bbox_targets_blob
blobs['bbox_inside_weights'] = bbox_inside_blob
blobs['bbox_outside_weights'] = \
np.array(bbox_inside_blob > 0).astype(np.float32)
return blobs
def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# label = class RoI has max overlap with
labels = roidb['max_classes']
overlaps = roidb['max_overlaps']
rois = roidb['boxes']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &
(overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
overlaps = overlaps[keep_inds]
rois = rois[keep_inds]
bbox_targets, bbox_inside_weights = _get_bbox_regression_labels(
roidb['bbox_targets'][keep_inds, :], num_classes)
return labels, overlaps, rois, bbox_targets, bbox_inside_weights
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps):
"""Visualize a mini-batch for debugging."""
import matplotlib.pyplot as plt
for i in xrange(rois_blob.shape[0]):
rois = rois_blob[i, :]
im_ind = rois[0]
roi = rois[1:]
im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy()
im += cfg.PIXEL_MEANS
im = im[:, :, (2, 1, 0)]
im = im.astype(np.uint8)
cls = labels_blob[i]
plt.imshow(im)
print 'class: ', cls, ' overlap: ', overlaps[i]
plt.gca().add_patch(
plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
|
mit
|
vighneshbirodkar/scikit-image
|
skimage/future/graph/rag.py
|
5
|
19594
|
import networkx as nx
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy import ndimage as ndi
from scipy import sparse
import math
from ... import measure, segmentation, util, color
from matplotlib import colors, cm
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
def _edge_generator_from_csr(csr_matrix):
"""Yield weighted edge triples for use by NetworkX from a CSR matrix.
This function is a straight rewrite of
`networkx.convert_matrix._csr_gen_triples`. Since that is a private
function, it is safer to include our own here.
Parameters
----------
csr_matrix : scipy.sparse.csr_matrix
The input matrix. An edge (i, j, w) will be yielded if there is a
data value for coordinates (i, j) in the matrix, even if that value
is 0.
Yields
------
i, j, w : (int, int, float) tuples
Each value `w` in the matrix along with its coordinates (i, j).
Examples
--------
>>> dense = np.eye(2, dtype=np.float)
>>> csr = sparse.csr_matrix(dense)
>>> edges = _edge_generator_from_csr(csr)
>>> list(edges)
[(0, 0, 1.0), (1, 1, 1.0)]
"""
nrows = csr_matrix.shape[0]
values = csr_matrix.data
indptr = csr_matrix.indptr
col_indices = csr_matrix.indices
for i in range(nrows):
for j in range(indptr[i], indptr[i + 1]):
yield i, col_indices[j], values[j]
def min_weight(graph, src, dst, n):
"""Callback to handle merging nodes by choosing minimum weight.
Returns a dictionary with `"weight"` set as either the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The verices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dict with the `"weight"` attribute set the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
"""
# cover the cases where n only has edge to either `src` or `dst`
default = {'weight': np.inf}
w1 = graph[n].get(src, default)['weight']
w2 = graph[n].get(dst, default)['weight']
return {'weight': min(w1, w2)}
def _add_edge_filter(values, graph):
"""Create edge in `graph` between central element of `values` and the rest.
Add an edge between the middle element in `values` and
all other elements of `values` into `graph`. ``values[len(values) // 2]``
is expected to be the central value of the footprint used.
Parameters
----------
values : array
The array to process.
graph : RAG
The graph to add edges in.
Returns
-------
0 : float
Always returns 0. The return value is required so that `generic_filter`
can put it in the output array, but it is ignored by this filter.
"""
values = values.astype(int)
center = values[len(values) // 2]
for value in values:
if value != center and not graph.has_edge(center, value):
graph.add_edge(center, value)
return 0.
class RAG(nx.Graph):
"""
The Region Adjacency Graph (RAG) of an image, subclasses
`networx.Graph <http://networkx.github.io/documentation/latest/reference/classes.graph.html>`_
Parameters
----------
label_image : array of int
An initial segmentation, with each region labeled as a different
integer. Every unique value in ``label_image`` will correspond to
a node in the graph.
connectivity : int in {1, ..., ``label_image.ndim``}, optional
The connectivity between pixels in ``label_image``. For a 2D image,
a connectivity of 1 corresponds to immediate neighbors up, down,
left, and right, while a connectivity of 2 also includes diagonal
neighbors. See `scipy.ndimage.generate_binary_structure`.
data : networkx Graph specification, optional
Initial or additional edges to pass to the NetworkX Graph
constructor. See `networkx.Graph`. Valid edge specifications
include edge list (list of tuples), NumPy arrays, and SciPy
sparse matrices.
**attr : keyword arguments, optional
Additional attributes to add to the graph.
"""
def __init__(self, label_image=None, connectivity=1, data=None, **attr):
super(RAG, self).__init__(data, **attr)
if self.number_of_nodes() == 0:
self.max_id = 0
else:
self.max_id = max(self.nodes_iter())
if label_image is not None:
fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
# In the next ``ndi.generic_filter`` function, the kwarg
# ``output`` is used to provide a strided array with a single
# 64-bit floating point number, to which the function repeatedly
# writes. This is done because even if we don't care about the
# output, without this, a float array of the same shape as the
# input image will be created and that could be expensive in
# memory consumption.
ndi.generic_filter(
label_image,
function=_add_edge_filter,
footprint=fp,
mode='nearest',
output=as_strided(np.empty((1,), dtype=np.float_),
shape=label_image.shape,
strides=((0,) * label_image.ndim)),
extra_arguments=(self,))
def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True,
extra_arguments=[], extra_keywords={}):
"""Merge node `src` and `dst`.
The new combined node is adjacent to all the neighbors of `src`
and `dst`. `weight_func` is called to decide the weight of edges
incident on the new node.
Parameters
----------
src, dst : int
Nodes to be merged.
weight_func : callable, optional
Function to decide the attributes of edges incident on the new
node. For each neighbor `n` for `src and `dst`, `weight_func` will
be called as follows: `weight_func(src, dst, n, *extra_arguments,
**extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the
RAG object which is in turn a subclass of `networkx.Graph`. It is
expected to return a dict of attributes of the resulting edge.
in_place : bool, optional
If set to `True`, the merged node has the id `dst`, else merged
node has a new id which is returned.
extra_arguments : sequence, optional
The sequence of extra positional arguments passed to
`weight_func`.
extra_keywords : dictionary, optional
The dict of keyword arguments passed to the `weight_func`.
Returns
-------
id : int
The id of the new node.
Notes
-----
If `in_place` is `False` the resulting node has a new id, rather than
`dst`.
"""
src_nbrs = set(self.neighbors(src))
dst_nbrs = set(self.neighbors(dst))
neighbors = (src_nbrs | dst_nbrs) - set([src, dst])
if in_place:
new = dst
else:
new = self.next_id()
self.add_node(new)
for neighbor in neighbors:
data = weight_func(self, src, new, neighbor, *extra_arguments,
**extra_keywords)
self.add_edge(neighbor, new, attr_dict=data)
self.node[new]['labels'] = (self.node[src]['labels'] +
self.node[dst]['labels'])
self.remove_node(src)
if not in_place:
self.remove_node(dst)
return new
def add_node(self, n, attr_dict=None, **attr):
"""Add node `n` while updating the maximum node id.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n, attr_dict, **attr)
self.max_id = max(n, self.max_id)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between `u` and `v` while updating max node id.
.. seealso:: :func:`networkx.Graph.add_edge`."""
super(RAG, self).add_edge(u, v, attr_dict, **attr)
self.max_id = max(u, v, self.max_id)
def copy(self):
"""Copy the graph with its max node id.
.. seealso:: :func:`networkx.Graph.copy`."""
g = super(RAG, self).copy()
g.max_id = self.max_id
return g
def next_id(self):
"""Returns the `id` for the new node to be inserted.
The current implementation returns one more than the maximum `id`.
Returns
-------
id : int
The `id` of the new node to be inserted.
"""
return self.max_id + 1
def _add_node_silent(self, n):
"""Add node `n` without updating the maximum node id.
This is a convenience method used internally.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n)
def rag_mean_color(image, labels, connectivity=2, mode='distance',
sigma=255.0):
"""Compute the Region Adjacency Graph using mean colors.
Given an image and its initial segmentation, this method constructs the
corresponding Region Adjacency Graph (RAG). Each node in the RAG
represents a set of pixels within `image` with the same label in `labels`.
The weight between two adjacent regions represents how similar or
dissimilar two regions are depending on the `mode` parameter.
Parameters
----------
image : ndarray, shape(M, N, [..., P,] 3)
Input image.
labels : ndarray, shape(M, N, [..., P,])
The labelled image. This should have one dimension less than
`image`. If `image` has dimensions `(M, N, 3)` `labels` should have
dimensions `(M, N)`.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.generate_binary_structure`.
mode : {'distance', 'similarity'}, optional
The strategy to assign edge weights.
'distance' : The weight between two adjacent regions is the
:math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
colors of the two regions. It represents the Euclidean distance in
their average color.
'similarity' : The weight between two adjacent is
:math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
:math:`c_1` and :math:`c_2` are the mean colors of the two regions.
It represents how similar two regions are.
sigma : float, optional
Used for computation when `mode` is "similarity". It governs how
close to each other two colors should be, for their corresponding edge
weight to be significant. A very large value of `sigma` could make
any two colors behave as though they were similar.
Returns
-------
out : RAG
The region adjacency graph.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274
"""
graph = RAG(labels, connectivity=connectivity)
for n in graph:
graph.node[n].update({'labels': [n],
'pixel count': 0,
'total color': np.array([0, 0, 0],
dtype=np.double)})
for index in np.ndindex(labels.shape):
current = labels[index]
graph.node[current]['pixel count'] += 1
graph.node[current]['total color'] += image[index]
for n in graph:
graph.node[n]['mean color'] = (graph.node[n]['total color'] /
graph.node[n]['pixel count'])
for x, y, d in graph.edges_iter(data=True):
diff = graph.node[x]['mean color'] - graph.node[y]['mean color']
diff = np.linalg.norm(diff)
if mode == 'similarity':
d['weight'] = math.e ** (-(diff ** 2) / sigma)
elif mode == 'distance':
d['weight'] = diff
else:
raise ValueError("The mode '%s' is not recognised" % mode)
return graph
def rag_boundary(labels, edge_map, connectivity=2):
""" Comouter RAG based on region boundaries
Given an image's initial segmentation and its edge map this method
constructs the corresponding Region Adjacency Graph (RAG). Each node in the
RAG represents a set of pixels within the image with the same label in
`labels`. The weight between two adjacent regions is the average value
in `edge_map` along their boundary.
labels : ndarray
The labelled image.
edge_map : ndarray
This should have the same shape as that of `labels`. For all pixels
along the boundary between 2 adjacent regions, the average value of the
corresponding pixels in `edge_map` is the edge weight between them.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.filters.generate_binary_structure`.
Examples
--------
>>> from skimage import data, segmentation, filters, color
>>> from skimage.future import graph
>>> img = data.chelsea()
>>> labels = segmentation.slic(img)
>>> edge_map = filters.sobel(color.rgb2gray(img))
>>> rag = graph.rag_boundary(labels, edge_map)
"""
conn = ndi.generate_binary_structure(labels.ndim, connectivity)
eroded = ndi.grey_erosion(labels, footprint=conn)
dilated = ndi.grey_dilation(labels, footprint=conn)
boundaries0 = (eroded != labels)
boundaries1 = (dilated != labels)
labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
n = np.max(labels_large) + 1
# use a dummy broadcast array as data for RAG
ones = as_strided(np.ones((1,), dtype=np.float), shape=labels_small.shape,
strides=(0,))
count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)),
dtype=np.int_, shape=(n, n)).tocsr()
data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1]))
data_coo = sparse.coo_matrix((data, (labels_small, labels_large)))
graph_matrix = data_coo.tocsr()
graph_matrix.data /= count_matrix.data
rag = RAG()
rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix),
weight='weight')
rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix),
weight='count')
for n in rag.nodes():
rag.node[n].update({'labels': [n]})
return rag
def show_rag(labels, rag, img, border_color='black', edge_width=1.5,
edge_cmap='magma', img_cmap='bone', in_place=True, ax=None):
"""Draw a Region Adjacency Graph on an image.
Given a labelled image and its corresponding RAG, draw the nodes and edges
of the RAG on the image with the specified colors. Edges are drawn between
the centroid of the 2 adjacent regions in the image.
Parameters
----------
labels : ndarray, shape (M, N)
The labelled image.
rag : RAG
The Region Adjacency Graph.
img : ndarray, shape (M, N[, 3])
Input image. If `colormap` is `None`, the image should be in RGB
format.
border_color : color spec, optional
Color with which the borders between regions are drawn.
edge_width : float, optional
The thickness with which the RAG edges are drawn.
edge_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the edges are drawn.
img_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the image is draw. If set to `None`
the image is drawn as it is.
in_place : bool, optional
If set, the RAG is modified in place. For each node `n` the function
will set a new attribute ``rag.node[n]['centroid']``.
ax : :py:class:`matplotlib.axes.Axes`, optional
The axes to draw on. If not specified, new axes are created and drawn
on.
Returns
-------
lc : :py:class:`matplotlib.collections.LineCollection`
A colection of lines that represent the edges of the graph. It can be
passed to the :meth:`matplotlib.figure.Figure.colorbar` function.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.coffee()
>>> labels = segmentation.slic(img)
>>> g = graph.rag_mean_color(img, labels)
>>> lc = graph.show_rag(labels, g, img)
>>> cbar = plt.colorbar(lc)
"""
if not in_place:
rag = rag.copy()
if ax is None:
fig, ax = plt.subplots()
out = util.img_as_float(img, force_copy=True)
if img_cmap is None:
if img.ndim < 3 or img.shape[2] not in [3, 4]:
msg = 'If colormap is `None`, an RGB or RGBA image should be given'
raise ValueError(msg)
# Ignore the alpha channel
out = img[:, :, :3]
else:
img_cmap = cm.get_cmap(img_cmap)
out = color.rgb2gray(img)
# Ignore the alpha channel
out = img_cmap(out)[:, :, :3]
edge_cmap = cm.get_cmap(edge_cmap)
# Handling the case where one node has multiple labels
# offset is 1 so that regionprops does not ignore 0
offset = 1
map_array = np.arange(labels.max() + 1)
for n, d in rag.nodes_iter(data=True):
for label in d['labels']:
map_array[label] = offset
offset += 1
rag_labels = map_array[labels]
regions = measure.regionprops(rag_labels)
for (n, data), region in zip(rag.nodes_iter(data=True), regions):
data['centroid'] = tuple(map(int, region['centroid']))
cc = colors.ColorConverter()
if border_color is not None:
border_color = cc.to_rgb(border_color)
out = segmentation.mark_boundaries(out, rag_labels, color=border_color)
ax.imshow(out)
# Defining the end points of the edges
# The tuple[::-1] syntax reverses a tuple as matplotlib uses (x,y)
# convention while skimage uses (row, column)
lines = [[rag.node[n1]['centroid'][::-1], rag.node[n2]['centroid'][::-1]]
for (n1, n2) in rag.edges_iter()]
lc = LineCollection(lines, linewidths=edge_width, cmap=edge_cmap)
edge_weights = [d['weight'] for x, y, d in rag.edges_iter(data=True)]
lc.set_array(np.array(edge_weights))
ax.add_collection(lc)
return lc
|
bsd-3-clause
|
fengz10/ICN_BM
|
plotResults/AvgPathLength.py
|
1
|
1444
|
from numpy import array
from numpy import arange
import matplotlib.pyplot as plt
params = {'legend.fontsize': 20,
'legend.linewidth': 2}
plt.rcParams.update(params)
avgPathLenghShortest = (3, 2.1562, 1.5496, 1.1386, 0.855, 0.6512, 0.4896, 0.3158, 0.22, 0.109, 0)
avgPathNoValley = (5, 3.3362, 2.1664, 1.4502, 0.9786, 0.7064, 0.5256, 0.3178, 0.2216, 0.109, 0)
# Max hops are always 3, and 5 respectively
# When cacheRatio > 0.6, Max hops sometimes are 2 hops
# When cacheRatio > 0.9, Max hops always are 1 hop
# When cacheRatio = 1,0, Max hops all 0.
x1 = arange(0.0, 1.1, 0.1)
plt.plot(x1, avgPathLenghShortest, 'bD-', linewidth=2.5, markersize=15, label='SCM model')
plt.plot(x1, avgPathNoValley, 'k^--', linewidth=3.0, markersize=15, label='GR model')
###设置刻度值
#0, 0.1, 0.2, .. 1.0
plt.xticks(arange(0.0, 1.1, 0.2), ('0.0', '0.2', '0.4', '0.6', '0.8', '1.0'), fontsize=20)
#axes.set_xlim(0.0, 1.1)
plt.yticks([0, 1, 2, 3, 4, 5], (' ', '1', '2', '3', '4', '5'), fontsize=20)
plt.xlabel('Replication size ratio (%)', fontsize=22)
plt.ylabel('AS hops', fontsize=22)
plt.legend(loc='upper right', numpoints = 1) #upper left
#plt.xlabel('AS number', fontsize=20)
plt.ylabel('AS hops', fontsize=20)
plt.legend(loc='upper right', numpoints = 1) #upper left
#为图的周边保留空白,如不调整,横坐标的名称挡住了
plt.subplots_adjust(left = 0.08, bottom=0.12)
plt.show()
|
gpl-2.0
|
sanja7s/SR_Twitter
|
src_CAPITAL/Distributions_of_Capital.py
|
1
|
7031
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
analyze assortativity of the graphs in terms of sentiment
'''
from igraph import *
import networkx as nx
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
import matplotlib.cm as cm
from collections import defaultdict
import matplotlib
import pandas as pd
import seaborn as sns
sns.set(color_codes=True, font_scale=2)
sns.set_style('whitegrid')
import pandas as pd
from scipy import stats, integrate
f_in_user_labels = "usr_num_CVs.tab"
##################
f_in_user_taxons = "user_taxons.tab"
f_in_user_concepts = "user_concepts.tab"
f_in_user_entities = "user_entities.tab"
f_in_num_tweets = "usr_num_tweets.tab"
#########################
#
f_in_user_sentiment = "user_sentiment.tab"
# mention graph
#########################
f_in_graph = "threshold_mention_graphs/directed_threshold0.tab"
f_in_graph_weights = "threshold_mention_graphs/mention_graph_weights.dat"
f_out_sent_mention_graph = "directed_threshold0_sent_val.tab"
IN_DIR = "../../../DATA/CAPITAL/"
f_out_mention = "sentiment_assortativity_mention_2.txt"
#########################
f_in_graph_weights = "mention_graph_weights.dat"
os.chdir(IN_DIR)
def social_capital_distributions(f_name, tname):
f = open(f_name, "r")
cap = []
cnt = 0
for line in f:
if tname == 'sentiment':
(vid, vn, val) = line.split('\t')
val = float(val)
elif tname == 'status inconsistency':
(vid, val) = line.split('\t')
val = float(val)
else:
(vid, val) = line.split('\t')
val = int(val)
cap.append(val)
cnt += 1
print cnt
#plot_cap_distr_CVs(cap, tname)
plot_cap_distr_entities(cap, 'entities')
#plot_cap_distr_CVs(cap, 'CVs')
def social_capital_distributions_1(f_name, tname):
f = open(f_name, "r")
cap = []
cnt = 0
for line in f:
(vid, val) = line.split('\t')
val = int(val)
cap.append(val)
cnt += 1
print cnt
plot_cap_distr_1(cap, tname)
def plot_cap_distr_1(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins)
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
plt.plot(x,y,color='darkorchid',label=lab)
plt.xlabel('# '+tname)
plt.ylabel('p(# ' +tname+ ' )')
plt.yscale('log')
#plt.xscale('log')
plt.legend()
plt.savefig(tname + '1.eps')
def plot_cap_distr(x, tname):
x = np.array(x)
mu = np.mean(x)
sigma = np.std(x)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
num_bins = 100
# the histogram of the data
n, bins, patches = plt.hist(x, normed=1, bins=num_bins, histtype='step',color='darkorchid')
plt.clf() # Get rid of this histogram since not the one we want.
nx_frac = n/float(len(n)) # Each bin divided by total number of objects.
width = bins[1] - bins[0] # Width of each bin.
x = np.ravel(zip(bins[:-1], bins[:-1]+width))
y = np.ravel(zip(nx_frac,nx_frac))
plt.title(lab)
plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('' + tname)
plt.ylabel('p('+ tname +')')
plt.tight_layout()
#plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
plt.xlim(-1,1)
#plt.ylim(-0.005,0.015)
#plt.grid(True)
plt.savefig(tname + '_v2.eps')
def create_distr_sent(x):
d = stats.gaussian_kde(x)
print d
return d
def plot_cap_distr_7s(z, tname):
z = np.array(z, dtype=np.float)
mu = np.mean(z)
sigma = np.std(z)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
sns.distplot(z, hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, color="r")
plt.title(lab)
plt.xlabel('' + tname)
plt.ylabel('kde('+ tname +')')
plt.xlim(-1,1)
plt.tight_layout()
plt.savefig(tname + '_v7.eps')
"""
kde1 = create_distr_sent(z)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(z, np.zeros(z.shape), 'b+', ms=20) # rug plot
x_eval = np.linspace(-1, 1, num=2000)
ax.plot(x_eval, kde1(x_eval), 'k-', label="Scott's Rule")
"""
#plt.show()
"""
x = d.keys()
y = d.values()
plt.scatter(x,y,color='darkorchid',label=lab)
plt.xlabel('' + tname)
plt.ylabel('p('+ tname +')')
plt.tight_layout()
#plt.yscale('log')
#plt.xscale('log')
#xint = range(int(min(x)), int(math.ceil(max(x))+1))
#plt.xticks(xint)
plt.xlim(-1,1)
#plt.ylim(-0.005,0.015)
#plt.grid(True)
"""
def plot_cap_distr_entities(z, tname):
z = np.array(z, dtype=np.float)
mu = np.mean(z)
sigma = np.std(z)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(6,6)
fig7s.set_size_inches((6,6))
plt.figure(figsize=(6, 6))
sns.distplot(z, bins=30, hist=0, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r")
plt.title(lab)
plt.xlabel('entity diversity')
plt.ylabel('kde')
plt.xlim(-1,31)
plt.tight_layout()
plt.savefig(tname + '_v7.eps')
def plot_cap_distr_CVs(z, tname):
z = np.array(z, dtype=np.float)
mu = np.mean(z)
sigma = np.std(z)
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(6,6)
fig7s.set_size_inches((6,6))
plt.figure(figsize=(6, 6))
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
sns.distplot(z, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r", hist=0)
plt.title(lab)
plt.xlabel('CV concept diversity')
plt.ylabel('kde')
plt.xlim(0,2000)
plt.tight_layout()
plt.savefig(tname + '_v77.eps')
#social_capital_distributions('entities', 'entities')
#social_capital_distributions('node_scalar_inconsistency_v2', 'status inconsistency')
def plot_cap_distr_BI(z, tname):
z = np.array(z, dtype=np.float)
mu = np.mean(z)
sigma = np.std(z)
lab = '$\mu=' + "{:.3f}".format(mu) \
+ '$, $\sigma= ' + "{:.3f}".format(sigma) + '$'
fig7s = plt.gcf()
plt.rcParams['figure.figsize']=(6,6)
fig7s.set_size_inches((6,6))
plt.figure(figsize=(6, 6))
sns.distplot(z, bins=30, hist=0, \
#hist_kws={"histtype": "step", "linewidth": 1, "alpha": 0.3, "color": "g"}, \
color="r")
plt.title(lab)
plt.xlabel('Burt\'s index')
plt.ylabel('kde')
plt.xlim(-0.1,max(z)+0.1)
plt.tight_layout()
plt.savefig(tname + '_v7s.eps')
def read_BI():
return pd.read_csv('BI_indexR_full.txt',\
encoding='utf-8', delim_whitespace=1)
def BI_capital_distribution():
bi = read_BI()
print max(bi['bi']), min(bi['bi'])
bidict = bi.set_index('id')['bi'].to_dict()
cnt = 0
for el in bidict:
if bidict[el] > 1:
bidict[el] = 1
cnt += 1
print cnt
plot_cap_distr_BI(bidict.values(), 'Burt\'s index')
BI_capital_distribution()
|
mit
|
CivicKnowledge/metatab-packages
|
broken/cdss.ca.gov-residential_care_facilities/scripts/geocode.py
|
1
|
8750
|
#!/usr/bin/env python
#
# Geocode the facilities records, using old geocode records when they exist.
import requests
from six import StringIO
from os import environ
import csv
import sys
import json
import metatab as mt
from itertools import islice
from geoid.census import Tract as CensusTract
from geoid.acs import Tract as AcsTract
from requests.exceptions import Timeout
from rowgenerators import SourceError
from metatab import MetatabError
doc = mt.MetatabDoc(environ['METATAB_DOC'])
## Need to have the facility number -> zip seperately.
fac_zip = {}
for row in doc.resource('facilities').iterdict:
fac_zip[row['facility_number']] = row['facility_zip']
##
## Load old geocodes, to save time and remote resources
##
old_url='http://s3.amazonaws.com/library.metatab.org/{}.csv'.format(doc.as_version('-1').find_first_value('Root.Name'))
old_geo = {}
try:
for row in mt.open_package(old_url).resource('geocodes').iterdict:
try:
ui = int(row['unique_id'])
row['unique_id'] = ui
old_geo[ui] = row
except ValueError:
# Erroroneous rows, have 'unique_id' == 'Unique'
pass
except (SourceError, MetatabError) as e:
print("Failed to load old geocodes", e, file=sys.stderr)
geocoder_header = 'unique_id input_address match quality match_address latlon tiger_id side_of_street state_fips county_fips tract_fips block_fips'.split()
out_header = 'unique_id input_address match quality match_address lat lon tiger_id side_of_street state_fips county_fips tract_fips block_fips tract_geoid'.split()
w = csv.DictWriter(sys.stdout, out_header)
w.writeheader()
def make_zip_map():
"""Create a map from zip to track that uses the HUD zip-tract cross walk as a probablilty
map, with the facility it used as the probability. Using the facility ID makes the mapping stable. """
zip_xwalk_doc = mt.open_package('http://library.metatab.org/huduser.gov-zip_tract-2016-2.csv')
zip_xwalk = zip_xwalk_doc.resource('zip-tract')
zip_xwalk_df = zip_xwalk.dataframe()
zx_groups = zip_xwalk_df.sort_values('res_ratio').groupby('zip')
def make_single_zip_map_f(groups, zip):
"""Function to create a closure for mapping for a single zip, from an id value to
tract"""
import numpy as np
import pandas as pd
# Use the resigential ratios, the portion of the homes in the zip that are in each tract.
res_ratios = list(zx_groups.get_group(zip).cumsum().res_ratio)
tracts = list(zx_groups.get_group(zip).tract)
assert len(res_ratios) == len(tracts)
def _f(id):
# Use the end of the ID value to ensure repeadability
n = float(id%100) / 100.0
index = np.argmax(pd.Series(res_ratios) > n)
return tracts[index]
return _f
f_map = {}
# dict that returns, for each zip, the function to get a tract for the id number.
for zp in zx_groups.groups.keys():
f_map[zp] = make_single_zip_map_f(zx_groups, zp)
# Finally, put it all together in a single closure.
def lookup(zip, n):
try:
# The map will return a Census geoid, which has 11 charasters, but it is often missing
# the leading 0, so we have to put it back. Then it much be converted to an
# ACS Tract
census_tract_str = str(f_map[int(zip)](int(n)%100 / 100.0)).zfill(11)
return str(AcsTract.parse(census_tract_str))
except KeyError:
return None
return lookup
zip_to_tract = make_zip_map()
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def make_request(rows):
if len(rows) == 0:
return
header = "Unique ID, Street address, City, State, ZIP".split(',')
strio = StringIO()
sw = csv.writer(strio)
#sw.writerow(header)
sw.writerows(rows)
text = strio.getvalue()
tries = 3
yielded = set()
url = 'https://geocoding.geo.census.gov/geocoder/geographies/addressbatch'
payload = {
'benchmark':'Public_AR_Current',
'vintage':'ACS2013_Current',
'returntype': 'geographies'
}
files = {'addressFile': ('report.csv', text) }
while tries:
try:
r = requests.post(url, files=files, data = payload, timeout= 2*60)
r.raise_for_status()
for i, row in enumerate(csv.reader(StringIO(r.text))):
if not tuple(row) in yielded:
yield row
yielded.add(tuple(row))
break
except Timeout as e:
tries -= 1
print("TIMEOUT!", e, file=sys.stderr)
except Exception as e:
tries -= 1
print("ERROR", e, file=sys.stderr)
def mkdict(row):
"""Turn a geocoder response row into a well-formed dict"""
d = dict(zip(geocoder_header, row))
if len(row) > 3:
try:
try:
d['lat'], d['lon'] = d['latlon'].split(',')
except ValueError as e:
d['lat'], d['lon'] = '',''
d['tract_geoid'] = str(Tract( int(d['state_fips']), int(d['county_fips']), int(d['tract_fips']) ))
try:
del d['latlon']
except Exception as e:
pass
except Exception as e:
# These appear to be errors associated with quote characters in the addresses, like
# 366426709,"8430 I AVENUE""", HESPERIA, CA," 92345""",No_Match. There aren't many of
# them, but they are a problem
print("ERROR for ", row, e, file=sys.stderr)
d['input_address'] = ''
d['match'] = 'Parse Error'
return d
# Chunk the facilities CSV file into smaller blocks. The Census geocoder will only
# accept 1000 lines at a time, but we'll send smallerfiles.
def chunked_geocode(doc):
# Header for the file submitted to the geocoder
row_n = 0
request_rows = []
for row in doc.resource('facilities').iterdict:
# If the row is in the old geocodes, yield it immediately
try:
if int(row['facility_number']) in old_geo:
old_row = old_geo[int(row['facility_number'])]
yield row_n, False, old_row
row_n += 1
continue
except ValueError:
pass
request_rows.append([row['facility_number'],
row['facility_address'],
row['facility_city'],
row['facility_state'],
row['facility_zip']])
if len(request_rows) > 250:
for row in make_request(request_rows):
# row colums are:
# unique_id input_address match quality match_address latlon tiger_id side_of_street state_fips county_fips tract_fips block_fips
yield row_n, True, mkdict(row)
row_n += 1
request_rows = [];
for row in make_request(request_rows):
# row colums are:
# unique_id input_address match quality match_address latlon tiger_id side_of_street state_fips county_fips tract_fips block_fips
yield row_n, True, mkdict(row)
row_n += 1
from geoid.census import Tract
for row_n, was_geocoded, row in chunked_geocode(doc):
if not row.get('tract_geoid'):
row['tract_geoid'] = zip_to_tract(fac_zip[int(row['unique_id'])], int(row['unique_id']))
row['side_of_street'] = None
row['tiger_id'] = None
if row['tract_geoid']:
if len(row['tract_geoid']) != 18:
# It's probably still a Census Tract, so convert it to an Acs tract.
row['tract_geoid'] = str(CensusTract.parse(row['tract_geoid'].zfill(11)).convert(AcsTract))
assert(len(row['tract_geoid'])) == 18, row['tract_geoid']
t = AcsTract.parse(row['tract_geoid'])
#print(str(t), file=sys.stderr)
row['state_fips'] = t.state
row['county_fips'] = t.county
row['tract_fips'] = t.tract
if row.get('state_fips'):
row['state_fips'] = str(row['state_fips']).zfill(2)
if row.get('county_fips'):
row['county_fips'] = str(row.get('county_fips')).zfill(3)
try:
w.writerow(row)
except:
print("ERROR ROW: ", row, e, file=sys.stderr)
|
mit
|
annahs/atmos_research
|
AL_distrs.py
|
1
|
2384
|
import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import calendar
from datetime import datetime
import mysql.connector
import math
SP2_ID = 58
start = datetime(2013,10,10)
end = datetime(2013,10,15)
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = calendar.timegm(end.utctimetuple())
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
bins={}
for i in range(70,310,10):
bins[i] = []
cursor.execute('''(SELECT
BB_incand_HG,
NB_incand_HG,
BB_incand_LG
FROM alert_mass_number_data_2013
WHERE
UNIX_UTC_ts_int_start >= %s
AND UNIX_UTC_ts_int_end < %s)''',
(UNIX_start,UNIX_end))
data = cursor.fetchall()
for row in data:
bbhg_incand_pk_amp = row[0]
nbhg_incand_pk_amp = row[1]
bblg_incand_pk_amp = row[2]
#calculate masses and uncertainties
#HG
bbhg_mass_uncorr = 0.29069 + 1.49267E-4*bbhg_incand_pk_amp + 5.02184E-10*bbhg_incand_pk_amp*bbhg_incand_pk_amp
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
#LG
bblg_mass_uncorr = -0.15884 + 0.00176*bblg_incand_pk_amp + 3.19118E-8*bblg_incand_pk_amp*bblg_incand_pk_amp
bblg_mass_corr = bblg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
if 0.33 <= bbhg_mass_corr < 1.8:
mass = bbhg_mass_corr
if 12.8 <= bblg_mass_corr < 41:
mass = bblg_mass_corr
if (1.8 <= bbhg_mass_corr < 12.8) or (1.8 <= bblg_mass_corr < 12.8):
mass = (bbhg_mass_corr + bblg_mass_corr)/2
VED = (((mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
for bin in bins:
if bin <= VED < (bin+10):
bins[bin].append([mass,(bbhg_incand_pk_amp/nbhg_incand_pk_amp)])
distr_data = []
for bin in bins:
size = bin +5
mass = sum([row[0] for row in bins[bin]])/(math.log(size+10)-math.log(size))
number = len([row[0] for row in bins[bin]])/(math.log(size+10)-math.log(size))
distr_data.append([size,mass,number])
distr_data.sort()
size = [row[0] for row in distr_data]
mass = [row[1] for row in distr_data]
number = [row[2] for row in distr_data]
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(size,mass,marker='o')
ax1.semilogx(size,number,marker='o')
plt.xlabel('VED (nm)')
plt.ylabel('dM/dlog(VED)')
plt.legend()
plt.show()
|
mit
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/calibration/plot_compare_calibration.py
|
29
|
5013
|
"""
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subsetting." As a result, the calibration curve shows a characteristic
sigmoid shape, indicating that the classifier could trust its "intuition"
more and return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
# #############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
|
gpl-3.0
|
wwjiang007/flink
|
flink-python/pyflink/fn_execution/beam/beam_operations.py
|
5
|
8672
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from apache_beam.runners.worker import bundle_processor, operation_specs
from pyflink.fn_execution import flink_fn_execution_pb2
from pyflink.fn_execution.coders import from_proto, from_type_info_proto, TimeWindowCoder, \
CountWindowCoder, FlattenRowCoder
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
import pyflink.fn_execution.datastream.operations as datastream_operations
import pyflink.fn_execution.table.operations as table_operations
try:
import pyflink.fn_execution.beam.beam_operations_fast as beam_operations
except ImportError:
import pyflink.fn_execution.beam.beam_operations_slow as beam_operations
# ----------------- UDF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.SCALAR_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_scalar_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.ScalarFunctionOperation)
# ----------------- UDTF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.TABLE_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_table_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.TableFunctionOperation)
# ----------------- UDAF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupAggregateOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_TABLE_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_table_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupTableAggregateOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_WINDOW_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_group_window_aggregate_function(factory, transform_id, transform_proto, parameter,
consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupWindowAggregateOperation)
# ----------------- Pandas UDAF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.PANDAS_AGGREGATE_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_pandas_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.PandasAggregateFunctionOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.PANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN,
flink_fn_execution_pb2.UserDefinedFunctions)
def create_pandas_over_window_aggregate_function(
factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.PandasBatchOverWindowAggregateFunctionOperation)
@bundle_processor.BeamTransformFactory.register_urn(
datastream_operations.DATA_STREAM_STATELESS_FUNCTION_URN,
flink_fn_execution_pb2.UserDefinedDataStreamFunction)
def create_data_stream_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
datastream_operations.StatelessOperation)
@bundle_processor.BeamTransformFactory.register_urn(
datastream_operations.DATA_STREAM_STATEFUL_FUNCTION_URN,
flink_fn_execution_pb2.UserDefinedDataStreamFunction)
def create_data_stream_keyed_process_function(factory, transform_id, transform_proto, parameter,
consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
datastream_operations.StatefulOperation)
def _create_user_defined_function_operation(factory, transform_proto, consumers, udfs_proto,
beam_operation_cls, internal_operation_cls):
output_tags = list(transform_proto.outputs.keys())
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=udfs_proto,
output_tags=output_tags,
input=None,
side_inputs=None,
output_coders=[output_coders[tag] for tag in output_tags])
if hasattr(spec.serialized_fn, "key_type"):
# keyed operation, need to create the KeyedStateBackend.
row_schema = spec.serialized_fn.key_type.row_schema
key_row_coder = FlattenRowCoder([from_proto(f.type) for f in row_schema.fields])
if spec.serialized_fn.HasField('group_window'):
if spec.serialized_fn.group_window.is_time_window:
window_coder = TimeWindowCoder()
else:
window_coder = CountWindowCoder()
else:
window_coder = None
keyed_state_backend = RemoteKeyedStateBackend(
factory.state_handler,
key_row_coder,
window_coder,
spec.serialized_fn.state_cache_size,
spec.serialized_fn.map_state_read_cache_size,
spec.serialized_fn.map_state_write_cache_size)
return beam_operation_cls(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls,
keyed_state_backend)
elif internal_operation_cls == datastream_operations.StatefulOperation:
key_row_coder = from_type_info_proto(spec.serialized_fn.key_type_info)
keyed_state_backend = RemoteKeyedStateBackend(
factory.state_handler,
key_row_coder,
None,
1000,
1000,
1000)
return beam_operation_cls(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls,
keyed_state_backend)
else:
return beam_operation_cls(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls)
|
apache-2.0
|
ch3ll0v3k/scikit-learn
|
sklearn/preprocessing/tests/test_imputation.py
|
213
|
11911
|
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
|
bsd-3-clause
|
JackKelly/neuralnilm_prototype
|
scripts/e247.py
|
2
|
7228
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
241
output is prediction
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
error = T.nnet.binary_crossentropy(x, t)
def mask_and_mean_error(mask):
masked_error = error[mask.nonzero()]
mean = masked_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
# subsample_target=5,
include_diff=False,
clip_appliance_power=True
# target_is_prediction=True
#lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Uniform(25)
}
]
)
def exp_a(name):
global source
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy['loss_function'] = scaled_cost
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_b(name):
# As A but with cross entropy
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_c(name):
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy['loss_function'] = scaled_cost
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': LSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Uniform(10)
}
)
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_d(name):
# As A but with cross entropy
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': LSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS,
'peepholes': False,
'W_in_to_cell': Uniform(10)
}
)
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_e(name):
# learning rate 0.1
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy['loss_function'] = scaled_cost
net_dict_copy['updates'] = partial(nesterov_momentum, learning_rate=0.1)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_f(name):
# As A but with cross entropy and learning rate 0.1
# source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['updates'] = partial(nesterov_momentum, learning_rate=0.1)
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('cdef'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=10000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
|
mit
|
PeterWangIntel/chromium-crosswalk
|
chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py
|
39
|
11336
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
NACL_DIR = os.path.join(CHROMIUM_DIR, 'native_client')
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
sys.path.append(NACL_DIR)
import detect_host_arch
import pynacl.platform
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], NACL_DIR, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, NACL_DIR, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
os_name = pynacl.platform.GetOS()
arch_name = pynacl.platform.GetArch()
toolchain_dir = os.path.join(NACL_DIR, 'toolchain',
'%s_%s' % (os_name, arch_name))
nacl_newlib_dir = os.path.join(toolchain_dir, 'nacl_%s_newlib' % arch_name)
nacl_glibc_dir = os.path.join(toolchain_dir, 'nacl_%s_glibc' % arch_name)
pnacl_newlib_dir = os.path.join(toolchain_dir, 'pnacl_newlib')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(CHROMIUM_DIR, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chrome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in detect_host_arch.HostArch():
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(NACL_DIR, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
'nacl_newlib_dir=%s' % nacl_newlib_dir,
'nacl_glibc_dir=%s' % nacl_glibc_dir,
'pnacl_newlib_dir=%s' % pnacl_newlib_dir,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
|
bsd-3-clause
|
boland1992/SeisSuite
|
seissuite/spectrum/Sfind_extrema.py
|
8
|
14439
|
# -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
nseg = min(wave.shape[-1],1024)
#plt.semilogy(f, np.sqrt(Pxx_spec))
f, Pxx_spec = signal.welch(wave, fs, 'flattop',
nperseg=nseg,
scaling='spectrum')
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014/2014-01'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014/'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
#fig1 = plt.figure(figsize=(15,10))
#ax1 = fig1.add_subplot(111)
#ax1.set_title("Seismic Waveform Power Density Spectrum\n{}".format('S | 2014'))
#ax1.set_xlabel('Frequency (Hz)')
#ax1.set_ylabel('Power Density Spectrum (V RMS)')
#ax1.set_xlim([0,4])
#ax1.grid(True, axis='both', color='gray')
#ax1.set_autoscaley_on(True)
#ax1.set_yscale('log')
# initialise dictionary to hold all maxima information for a given station
# this will be used to return a new dictionary of the average maxima
# for each station over the course of a year.
maxima_dict0 = {}
maxima_dict1 = {}
for s in paths_list:
print maxima_dict0
try:
split_path = s.split('/')
stat_info = split_path[-1][:-6]
net = stat_info.split('.')[0]
stat = stat_info.split('.')[1]
net_stat = '{}.{}'.format(net,stat)
year = split_path[-2].split('-')[0]
st = read(s)
if net == 'AU':
# set up loop for all traces within each imported stream.
t0 = datetime.datetime.now()
pool = mp.Pool()
spectra = pool.map(spectrum, st[:])
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time taken to calculate monthly spectra: ", t1-t0
# Caclulate weighted average spectrum for this station for this month
spectra = np.asarray(spectra)
search = np.where(spectra==0.)
print search
spectra = np.delete(spectra, search)
spectra = np.average(spectra, axis=0)
#quit()
X, Y = spectra[:,0], spectra[:,1]
extrema_indices = argrelextrema(Y, np.greater)[0]
#print "extrema_indices: ", extrema_indices
maxima_X = X[extrema_indices]
maxima_Y = Y[extrema_indices]
local_extrema = np.column_stack((maxima_X, maxima_Y))
#print "local_extrema: ", local_extrema
# sort local maxima
local_extrema = local_extrema[local_extrema[:, 1].argsort()]
local_extrema = local_extrema[::-1]
# retrieve the top two maxima from the PDS plot for use on
# noise map.
max0, max1 = local_extrema[0], local_extrema[1]
maxes = [max0,max1]
#print "max0, max1: ", max0, max1
if not net_stat in maxima_dict0.keys():
maxima_dict0[net_stat] = []
if net_stat in maxima_dict0.keys():
#if not len(maxima_dict[stat]) >= 1:
maxima_dict0[net_stat].append(max0)
if net_stat not in maxima_dict1.keys():
maxima_dict1[net_stat] = []
if net_stat in maxima_dict1.keys():
maxima_dict1[net_stat].append(max1)
#smooth_Y = np.convolve(X,Y)
#smooth_X = np.linspace(np.min(X), np.max(X),len(smooth_Y))
#plt.plot(smooth_X, smooth_Y, c='b', alpha=0.8)
#plt.figure()
#plt.plot(X, Y, c='k', alpha=0.5)
#plt.scatter(maxima_X, maxima_Y, c='r', s=30)
#plt.show()
#plt.clf()
except Exception as error:
print error
#plt.figure()
#stack and find average values for all of the above for each station
#for key in maxima_dict0.keys():
# stat_locs[key]
# maxima_dict0[key] = np.asarray(maxima_dict0[key])
# plt.scatter(maxima_dict0[key][:,0],maxima_dict0[key][:,1], c='b', s=10)
# maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
# plt.scatter(maxima_dict0[key][0],maxima_dict0[key][1], c='r', s=30)
# print maxima_dict0[key]
#for key in maxima_dict1.keys():
# maxima_dict1[key] = np.asarray(maxima_dict1[key])
# plt.scatter(maxima_dict1[key][:,0],maxima_dict1[key][:,1], c='b', s=10)
# maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
# plt.scatter(maxima_dict1[key][0],maxima_dict1[key][1], c='r', s=30)
#plt.show()
with open('maxima_dict0.', 'wb') as f:
pickle.dump(maxima_dict0, f, protocol=2)
noise_info0 = []
#stack and find average values for all of the above for each station
for key in maxima_dict0.keys():
try:
maxima_dict0[key] = np.asarray(maxima_dict0[key])
maxima_dict0[key] = np.average(maxima_dict0[key], axis=0)
noise_info0.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict0[key][1]])
except Exception as error:
print error
noise_info0 = np.asarray(noise_info0)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n Australian Networks | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm, zorder=2)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/Peak1 PDS Average Maxima 2014.svg', format='SVG')
quit()
noise_info1 = []
#stack and find average values for all of the above for each station
for key in maxima_dict1.keys():
maxima_dict1[key] = np.asarray(maxima_dict1[key])
maxima_dict1[key] = np.average(maxima_dict1[key], axis=0)
noise_info1.append([stat_locs[key][0],
stat_locs[key][1],
maxima_dict1[key][1]])
noise_info1 = np.asarray(noise_info1)
# dump noise_info1
with open('noise_info0.pickle', 'wb') as f:
pickle.dump(noise_info0, f, protocol=2)
fig1 = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise Second Peak Maximum PDS\n Australian Networks | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
cm = plt.cm.get_cmap('RdYlBu')
cmin, cmax = np.min(noise_info1[:,2]), np.max(noise_info1[:,2])
sc = plt.scatter(noise_info1[:,0], noise_info1[:,1], c=noise_info1[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap=cm)
col = plt.colorbar(sc)
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
fig1.savefig('station_pds_maxima/Peak2 PDS Average Maxima 2014.svg', format='SVG')
with open('noise_info1.pickle', 'wb') as f:
pickle.dump(noise_info1, f, protocol=2)
|
gpl-3.0
|
ClimbsRocks/scikit-learn
|
benchmarks/bench_plot_parallel_pairwise.py
|
127
|
1270
|
# Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
|
bsd-3-clause
|
DonBeo/scikit-learn
|
examples/neighbors/plot_nearest_centroid.py
|
264
|
1804
|
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
MaartenGr/BERTopic
|
tests/test_topic_representation.py
|
1
|
4938
|
"""
Unit tests for topic representation
This includes the following features:
* Extracting Topics
* Updating topics after extraction
* Topic reduction
"""
import pytest
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from bertopic.backend._utils import select_backend
from bertopic import BERTopic
newsgroup_docs = fetch_20newsgroups(subset='all')['data'][:1000]
def test_extract_topics():
""" Test Topic Extraction
Test whether topics could be extracted using c-TF-IDF.
Checks are related to the existence of topic representation,
not so much whether they make sense semantically.
"""
nr_topics = 5
documents = pd.DataFrame({"Document": newsgroup_docs,
"ID": range(len(newsgroup_docs)),
"Topic": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})
model = BERTopic()
model.embedding_model = select_backend("paraphrase-MiniLM-L6-v2")
model._update_topic_size(documents)
model._extract_topics(documents)
freq = model.get_topic_freq()
assert model.c_tf_idf.shape[0] == 5
assert model.c_tf_idf.shape[1] > 100
assert isinstance(freq, pd.DataFrame)
assert nr_topics == len(freq.Topic.unique())
assert freq.Count.sum() == len(documents)
assert len(freq.Topic.unique()) == len(freq)
def test_extract_topics_custom_cv():
""" Test Topic Extraction with custom Countvectorizer
Test whether topics could be extracted using c-TF-IDF.
Checks are related to the existence of topic representation,
not so much whether they make sense semantically.
"""
nr_topics = 5
documents = pd.DataFrame({"Document": newsgroup_docs,
"ID": range(len(newsgroup_docs)),
"Topic": np.random.randint(-1, nr_topics-1, len(newsgroup_docs))})
cv = CountVectorizer(ngram_range=(1, 2))
model = BERTopic(vectorizer_model=cv)
model.embedding_model = select_backend("paraphrase-MiniLM-L6-v2")
model._update_topic_size(documents)
model._extract_topics(documents)
freq = model.get_topic_freq()
assert model.c_tf_idf.shape[0] == 5
assert model.c_tf_idf.shape[1] > 100
assert isinstance(freq, pd.DataFrame)
assert nr_topics == len(freq.Topic.unique())
assert freq.Count.sum() == len(documents)
assert len(freq.Topic.unique()) == len(freq)
@pytest.mark.parametrize("reduced_topics", [1, 2, 4, 10])
def test_topic_reduction(reduced_topics):
""" Test Topic Reduction
The the reduction of topics after having generated
topics. This generation of the initial topics is done
manually as the training takes quite a while.
"""
nr_topics = reduced_topics + 2
model = BERTopic(nr_topics=reduced_topics)
model.embedding_model = select_backend("paraphrase-MiniLM-L6-v2")
topics = np.random.randint(-1, nr_topics-1, len(newsgroup_docs))
old_documents = pd.DataFrame({"Document": newsgroup_docs,
"ID": range(len(newsgroup_docs)),
"Topic": topics})
model.hdbscan_model.labels_ = topics
model._update_topic_size(old_documents)
old_documents = model._sort_mappings_by_frequency(old_documents)
model._extract_topics(old_documents.copy())
old_freq = model.get_topic_freq()
new_documents = model._reduce_topics(old_documents.copy())
new_freq = model.get_topic_freq()
assert old_freq.Count.sum() == new_freq.Count.sum()
assert len(old_freq.Topic.unique()) == len(old_freq)
assert len(new_freq.Topic.unique()) == len(new_freq)
assert isinstance(model.mapped_topics, dict)
assert not set(model.get_topic_freq().Topic).difference(set(new_documents.Topic))
assert model.mapped_topics
def test_topic_reduction_edge_cases():
""" Test Topic Reduction Large Nr Topics
Test whether the topics are not reduced if the reduced number
of topics exceeds the actual number of topics found
"""
nr_topics = 5
topics = np.random.randint(-1, nr_topics - 1, len(newsgroup_docs))
model = BERTopic()
model.embedding_model = select_backend("paraphrase-MiniLM-L6-v2")
model.nr_topics = 100
model.hdbscan_model.labels_ = topics
old_documents = pd.DataFrame({"Document": newsgroup_docs,
"ID": range(len(newsgroup_docs)),
"Topic": topics})
model._update_topic_size(old_documents)
model._extract_topics(old_documents)
old_freq = model.get_topic_freq()
new_documents = model._reduce_topics(old_documents)
new_freq = model.get_topic_freq()
assert not set(old_documents.Topic).difference(set(new_documents.Topic))
pd.testing.assert_frame_equal(old_documents, new_documents)
pd.testing.assert_frame_equal(old_freq, new_freq)
|
mit
|
gengliangwang/spark
|
python/pyspark/pandas/indexes/__init__.py
|
16
|
1065
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.indexes.base import Index # noqa: F401
from pyspark.pandas.indexes.datetimes import DatetimeIndex # noqa: F401
from pyspark.pandas.indexes.multi import MultiIndex # noqa: F401
from pyspark.pandas.indexes.numeric import Float64Index, Int64Index # noqa: F401
|
apache-2.0
|
rcrowder/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py
|
69
|
40833
|
"""
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
|
agpl-3.0
|
Eigenstate/msmbuilder
|
msmbuilder/project_templates/landmarks/find-landmarks.py
|
9
|
1323
|
"""Cluster based on RMSD between conformations
{{header}}
Meta
----
depends:
- meta.pandas.pickl
- trajs
- top.pdb
"""
import mdtraj as md
from msmbuilder.cluster import MiniBatchKMedoids
from msmbuilder.io import load_meta, itertrajs, save_generic, backup
## Set up parameters
kmed = MiniBatchKMedoids(
n_clusters=500,
metric='rmsd',
)
## Load
meta = load_meta()
## Try to limit RAM usage
def guestimate_stride():
total_data = meta['nframes'].sum()
want = kmed.n_clusters * 10
stride = max(1, total_data // want)
print("Since we have", total_data, "frames, we're going to stride by",
stride, "during fitting, because this is probably adequate for",
kmed.n_clusters, "clusters")
return stride
## Fit
kmed.fit([traj for _, traj in itertrajs(meta, stride=guestimate_stride())])
print(kmed.summarize())
## Save
save_generic(kmed, 'clusterer.pickl')
## Save centroids
def frame(traj_i, frame_i):
# Note: kmedoids does 0-based, contiguous integers so we use .iloc
row = meta.iloc[traj_i]
return md.load_frame(row['traj_fn'], frame_i, top=row['top_fn'])
centroids = md.join((frame(ti, fi) for ti, fi in kmed.cluster_ids_),
check_topology=False)
centroids_fn = 'centroids.xtc'
backup(centroids_fn)
centroids.save("centroids.xtc")
|
lgpl-2.1
|
NunoEdgarGub1/scikit-learn
|
sklearn/kernel_approximation.py
|
258
|
17973
|
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
|
bsd-3-clause
|
r-mart/scikit-learn
|
sklearn/metrics/classification.py
|
95
|
67713
|
"""Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# Jatin Shah <[email protected]>
# Saurabh Jha <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
chares-zhang/kafka
|
system_test/utils/metrics.py
|
89
|
13937
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
|
apache-2.0
|
Fireblend/scikit-learn
|
sklearn/metrics/pairwise.py
|
5
|
43701
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
Jacobsolawetz/trading-with-python
|
lib/interactivebrokers.py
|
77
|
18140
|
"""
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
|
bsd-3-clause
|
black-silence/PlanetNomadsSavegameEditor
|
GUI.py
|
1
|
26222
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import ttk, filedialog, messagebox, colorchooser
from tkinter.scrolledtext import ScrolledText
from typing import Text
import shutil
import os
import _tkinter
from PlanetNomads import Savegame
import platform
import zipfile
import sqlite3
import json
from math import sqrt
version = '1.3.0'
try:
from mpl_toolkits.mplot3d import Axes3D # Required for projection='3d'
import matplotlib.pyplot as plt
import numpy as np
enable_map = True
except ImportError:
enable_map = False
class GUI(Frame):
current_file = None
savegame = None
locked_buttons = []
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
parent.title("Planet Nomads Savegame Editor %s" % version)
# Toolbar
gui_toolbar_frame = ttk.Frame(parent, padding="5 5 5 5")
gui_toolbar_frame.pack(fill="both", expand=True)
gui_load_file_button = ttk.Button(gui_toolbar_frame, text="Select file", command=self.select_file)
gui_load_file_button.grid(row=0, column=0, sticky=(E, W))
gui_backup_button = ttk.Button(gui_toolbar_frame, text="Create backup", command=self.create_backup)
gui_backup_button.grid(row=0, column=1, sticky=(E, W))
self.locked_buttons.append(gui_backup_button)
self.gui_restore_button = ttk.Button(gui_toolbar_frame, text="Restore backup", command=self.restore_backup)
self.gui_restore_button.grid(row=0, column=2, sticky=(E, W))
self.gui_restore_button.state(["disabled"]) # Restore button is unlocked separately
gui_export_save_button = ttk.Button(gui_toolbar_frame, text="Export file", command=self.export_save)
gui_export_save_button.grid(row=1, column=1, sticky=(E, W))
self.locked_buttons.append(gui_export_save_button)
gui_import_save_button = ttk.Button(gui_toolbar_frame, text="Import file", command=self.import_save)
gui_import_save_button.grid(row=1, column=2, sticky=(E, W))
# content
gui_main_frame = ttk.Frame(parent, padding="5 5 5 5")
gui_main_frame.grid_rowconfigure(0, weight=1)
gui_main_frame.grid_columnconfigure(0, weight=1)
gui_main_frame.pack(fill="both", expand=True)
gui_tabs = ttk.Notebook(gui_main_frame)
gui_tabs.grid(sticky=(N, E, S, W))
# status
gui_status_frame = ttk.Frame(parent, relief="sunken", padding="2 2 2 2")
gui_status_frame.pack(fill="both", expand=True)
self.gui_status = ScrolledText(gui_status_frame, state='disabled', width=40, height=5, wrap='none')
self.gui_status.pack(expand=True, fill="both")
# Tabs after status bar to enable message display
gui_tabs.add(self.init_basic_buttons(gui_main_frame), text="Basic tools")
gui_tabs.add(self.init_machine_buttons(gui_main_frame), text="Machine tools")
gui_tabs.add(self.init_cheat_buttons(gui_main_frame), text="Cheats")
gui_tabs.add(self.init_dev_buttons(gui_main_frame), text="Dev tools")
for button in self.locked_buttons:
button.state(["disabled"])
def init_machine_buttons(self, gui_main_frame):
frame = ttk.Frame(gui_main_frame)
self.machine_select_options = ["Select machine"]
self.gui_selected_machine_identifier = StringVar(self.parent)
self.gui_selected_machine_identifier.set(self.machine_select_options[0])
self.gui_selected_machine_identifier.trace('w', self.on_machine_selected)
self.gui_machine_select = ttk.Combobox(frame, textvariable=self.gui_selected_machine_identifier,
values=self.machine_select_options, state='readonly')
self.gui_machine_select.grid(sticky=(E, W))
self.locked_buttons.append(self.gui_machine_select)
# Teleport area
teleport_tools = ttk.Frame(frame)
teleport_tools.grid(sticky=(N, E, S, W))
gui_push_machine_button = ttk.Button(teleport_tools, text="Teleport selected machine",
command=self.teleport_machine)
gui_push_machine_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_push_machine_button)
label = ttk.Label(teleport_tools, text=" to ")
label.grid(row=0, column=1)
self.gui_teleport_distance = IntVar(self.parent)
self.gui_teleport_distance.set(20)
self.gui_teleport_distance_button = ttk.Entry(teleport_tools, textvariable=self.gui_teleport_distance,
justify="center", width=5)
self.gui_teleport_distance_button.grid(row=0, column=2)
label = ttk.Label(teleport_tools, text=" meters over ")
label.grid(row=0, column=3)
options = ["current position"]
self.gui_teleport_machine_target = StringVar(self.parent)
self.gui_teleport_machine_target.set(options[0])
self.gui_teleport_target_button = ttk.OptionMenu(teleport_tools, self.gui_teleport_machine_target, *options)
self.gui_teleport_target_button.grid(row=0, column=4, sticky=(E, W))
self.locked_buttons.append(self.gui_teleport_target_button)
# Recolor area
color_grid = ttk.Frame(frame)
color_grid.grid(sticky=(N, E, S, W))
gui_randomize_color = ttk.Button(color_grid, text="Randomize colors", command=self.randomize_machine_color)
gui_randomize_color.grid(row=0, column=2, sticky=(E, W))
self.locked_buttons.append(gui_randomize_color)
gui_change_color = ttk.Button(color_grid, text="Paint all blocks", command=self.change_machine_color)
gui_change_color.grid(row=0, sticky=(E, W))
self.locked_buttons.append(gui_change_color)
gui_change_color = ttk.Button(color_grid, text="Paint grey blocks", command=self.replace_machine_color)
gui_change_color.grid(row=0, column=1, sticky=(E, W))
self.locked_buttons.append(gui_change_color)
return frame
def update_machine_select(self, machines):
self.machine_select_options = ["Select machine"]
target = self.gui_teleport_target_button["menu"]
target.delete(0, "end")
target.add_command(label="current position")
machine_list = []
for m in machines:
type = m.get_type()
name_id = m.get_name_or_id()
if type == "Construct" and m.get_name() == "":
continue # 300+ wrecks are just too much
machine_list.append("{} {} [{}]".format(type, name_id, m.identifier))
target.add_command(label="{} {}".format(type, name_id),
command=lambda value=m.identifier: self.gui_teleport_machine_target.set(value))
machine_list.sort()
self.machine_select_options.extend(machine_list)
self.gui_machine_select["values"] = self.machine_select_options
self.gui_selected_machine_identifier.set("Select machine")
self.gui_teleport_machine_target.set("current position")
def init_dev_buttons(self, gui_main_frame):
gui_dev_tools_frame = ttk.Frame(gui_main_frame)
gui_inventory_button = ttk.Button(gui_dev_tools_frame, text="List player inventory",
command=self.list_inventory)
gui_inventory_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_inventory_button)
gui_machines_button = ttk.Button(gui_dev_tools_frame, text="List machines", command=self.list_machines)
gui_machines_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_machines_button)
gui_teleport_northpole_button = ttk.Button(gui_dev_tools_frame,
text="Teleport player to north pole (death possible)",
command=self.teleport_northpole)
gui_teleport_northpole_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_teleport_northpole_button)
return gui_dev_tools_frame
def init_basic_buttons(self, gui_main_frame):
gui_basic_tools_frame = ttk.Frame(gui_main_frame)
if enable_map:
gui_draw_map_button = ttk.Button(gui_basic_tools_frame, text="Draw map", command=self.draw_map)
gui_draw_map_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_draw_map_button)
else:
self.update_statustext("Install numpy + matplotlib to enable the map!")
gui_unlock_button = ttk.Button(gui_basic_tools_frame, text="Unlock all recipes", command=self.unlock_recipes)
gui_unlock_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_unlock_button)
gui_northbeacon_button = ttk.Button(gui_basic_tools_frame, text="Create north pole beacon",
command=self.create_north_beacon)
gui_northbeacon_button.grid(row=0, column=1, sticky=(E, W))
self.locked_buttons.append(gui_northbeacon_button)
gui_southbeacon_button = ttk.Button(gui_basic_tools_frame, text="Create GPS beacons",
command=self.create_gps_beacons)
gui_southbeacon_button.grid(row=1, column=1, sticky=(E, W))
self.locked_buttons.append(gui_southbeacon_button)
return gui_basic_tools_frame
def get_selected_machine_id(self, warn=True):
"""Return selected machine id or print status message"""
machine_id = self.gui_selected_machine_identifier.get()
if machine_id == "Select machine":
if warn:
self.update_statustext("Select a machine first")
return
x = re.search(r'\[(\d+)]$', machine_id)
return int(x.group(1))
def get_selected_machine(self, warn=True):
machine_id = self.get_selected_machine_id(warn)
if not machine_id:
return
for machine in self.savegame.machines:
if machine.identifier == machine_id:
return machine
def on_machine_selected(self, *args):
machine = self.get_selected_machine(False)
if not machine:
return
machine_coords = machine.get_coordinates()
player_coords = self.savegame.get_player_position()
x = machine_coords[0] - player_coords[0]
y = machine_coords[1] - player_coords[1]
z = machine_coords[2] - player_coords[2]
distance = sqrt(x**2 + y**2 + z**2)
self.update_statustext("Selected machine %s, distance to player %.1f" % (machine.get_name_or_id(), distance))
def teleport_machine(self):
machine_id = self.get_selected_machine_id()
if not machine_id:
return
target = self.gui_teleport_machine_target.get()
if target == "current position":
target_id = None
else:
target_id = int(target)
try:
distance = self.gui_teleport_distance.get()
except _tkinter.TclError:
self.update_statustext("Please use only numbers in the teleport distance")
return
target_machine = None
active_machine = None
for machine in self.savegame.machines:
if machine.identifier == machine_id:
active_machine = machine
if not target_id:
target_machine = machine # Relative to its current position
if target_machine:
break # We found both or do not need a target
if machine.identifier == target_id:
target_machine = machine
if active_machine:
break # We found both
if not active_machine:
self.update_statustext("Something broke, did not find machine")
return
active_machine.teleport(distance, target_machine)
self.update_statustext("Machine {} teleported".format(active_machine.get_name_or_id()))
self.savegame.save()
def init_cheat_buttons(self, gui_main_frame):
gui_cheats_frame = ttk.Frame(gui_main_frame)
gui_resource_menu = Menu(gui_cheats_frame, tearoff=0)
gui_resource_menu.add_command(label="Aluminium", command=lambda: self.create_item(51))
gui_resource_menu.add_command(label="Biomass Container", command=lambda: self.create_item(392745))
gui_resource_menu.add_command(label="Carbon", command=lambda: self.create_item(49))
gui_resource_menu.add_command(label="Cobalt", command=lambda: self.create_item(60))
gui_resource_menu.add_command(label="Iron", command=lambda: self.create_item(56))
gui_resource_menu.add_command(label="Silicium", command=lambda: self.create_item(52))
gui_resource_menu.add_command(label="Silver", command=lambda: self.create_item(59))
gui_resource_menu.add_command(label="Titanium", command=lambda: self.create_item(57))
gui_resource_menu.add_command(label="Uranium", command=lambda: self.create_item(61))
gui_resource_menu.add_command(label="Enriched Uranium", command=lambda: self.create_item(63))
gui_resource_menubutton = ttk.Menubutton(gui_cheats_frame, text="Cheat: add stack of resource",
menu=gui_resource_menu)
gui_resource_menubutton.grid(sticky=(E, W))
self.locked_buttons.append(gui_resource_menubutton)
gui_item_menu = Menu(gui_cheats_frame, tearoff=0)
gui_item_menu.add_command(label="Basic Frame", command=lambda: self.create_item(69))
gui_item_menu.add_command(label="Composite 1", command=lambda: self.create_item(78))
gui_item_menu.add_command(label="Mechanical 1", command=lambda: self.create_item(76))
gui_item_menu.add_command(label="Plating", command=lambda: self.create_item(67))
gui_item_menu.add_command(label="Standard Electronics", command=lambda: self.create_item(73))
gui_item_menubutton = ttk.Menubutton(gui_cheats_frame, text="Cheat: add stack of item", menu=gui_item_menu)
gui_item_menubutton.grid(sticky=(E, W))
self.locked_buttons.append(gui_item_menubutton)
gui_unlock_button = ttk.Button(gui_cheats_frame, text="Cheat: give Mk4 equipment",
command=self.create_mk4_equipment)
gui_unlock_button.grid(sticky=(E, W))
self.locked_buttons.append(gui_unlock_button)
return gui_cheats_frame
def teleport_northpole(self):
if self.savegame.teleport_player(0, self.savegame.get_planet_size() + 250, 0):
self.update_statustext("Player teleported")
def update_statustext(self, message: str):
self.gui_status.config(state=NORMAL)
self.gui_status.insert(END, message + "\n")
self.gui_status.see(END)
self.gui_status.config(state=DISABLED)
def select_file(self):
"""
Show file select dialog
:return: None
"""
opts = {"filetypes": [("PN save files", "save_*.db"), ("All files", ".*")]}
os_name = platform.system()
if os_name == "Linux":
opts["initialdir"] = os.path.expanduser("~/.config/unity3d/Craneballs/PlanetNomads/")
elif os_name == "Windows":
opts["initialdir"] = os.path.expanduser("~\AppData\LocalLow\Craneballs\PlanetNomads")
# TODO MAC > USERS > [Your Username] > Library > Application Support > unity.Craneballs.PlanetNomads
filename = filedialog.askopenfilename(**opts)
if not filename:
return
self.load_file(filename)
def load_file(self, filename: Text):
"""
Load file
:type filename: Filename with absolute path
"""
self.current_file = filename
self.savegame = Savegame.Savegame()
self.savegame.load(self.current_file)
self.update_statustext("Loaded game '{}'".format(self.savegame.get_name()))
# Enable some buttons once a file is loaded
for button in self.locked_buttons:
button.state(["!disabled"])
if self.backup_exists(filename):
self.gui_restore_button.state(["!disabled"])
else:
self.gui_restore_button.state(["disabled"])
self.update_machine_select(self.savegame.machines)
def backup_exists(self, filename: Text) -> bool:
"""
Check if a backup exists for the given file
:param filename: Filename with absolute path
:return: bool
"""
return os.path.exists(filename + ".bak")
def create_backup(self):
if self.backup_exists(self.current_file):
if not messagebox.askokcancel("Overwrite existing backup?", "A backup already exists. Overwrite it?"):
return
try:
shutil.copy2(self.current_file, self.current_file + ".bak")
except IOError:
messagebox.showerror(message="Could not create backup file!")
else:
messagebox.showinfo("Backup created", "Backup was created")
self.gui_restore_button.state(["!disabled"])
def restore_backup(self):
res = messagebox.askokcancel("Please confirm", "Are you sure you want to restore the backup?")
if not res:
return
try:
shutil.copy2(self.current_file + ".bak", self.current_file)
self.savegame.reset()
except IOError:
messagebox.showerror(message="Could not restore backup file!")
else:
messagebox.showinfo("Backup restore", "Backup was restored")
def list_machines(self):
for m in self.savegame.machines:
print(m)
def unlock_recipes(self):
if self.savegame.unlock_recipes():
self.update_statustext("All blocks unlocked")
else:
self.update_statustext("Nothing unlocked. Is this a survival save?")
def create_north_beacon(self):
self.savegame.create_north_pole_beacon()
self.update_statustext("Beacon created with nav point C")
def create_gps_beacons(self):
self.savegame.create_gps_beacons()
self.update_statustext("3 beacons created, north pole + 2x equator")
def list_inventory(self):
inventory = self.savegame.get_player_inventory()
stacks = inventory.get_stacks()
for slot in stacks:
print("Slot {}: {} {}".format(slot, stacks[slot].get_count(), stacks[slot].get_item_name()))
def create_item(self, item_id, amount=90):
inventory = self.savegame.get_player_inventory()
if not inventory:
self.update_statustext("Could not load inventory")
return
item = Savegame.Item(item_id)
if not inventory.add_stack(item, amount):
messagebox.showerror(message="Could not create resource. All slots full?")
return
self.update_statustext("Added {} to inventory".format(item.get_name()))
inventory.save()
def create_mk4_equipment(self):
self.create_item(118, 1)
self.create_item(114, 1)
self.create_item(110, 1)
def draw_map(self):
# Based on https://stackoverflow.com/questions/11140163/python-matplotlib-plotting-a-3d-cube-a-sphere-and-a-vector
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
# Draw a sphere to mimic a planet
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = self.savegame.get_planet_size() * np.outer(np.cos(u), np.sin(v))
y = self.savegame.get_planet_size() * np.outer(np.sin(u), np.sin(v))
z = self.savegame.get_planet_size() * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rcount=18, ccount=21, alpha=0.1)
try:
selected_machine_id = int(self.gui_selected_machine_identifier.get())
except ValueError:
selected_machine_id = 0
colors = {"Base": "blue", "Vehicle": "orange", "Construct": "grey", "Selected": "red"}
markers = {"Base": "^", "Vehicle": "v", "Construct": ".", "Selected": "v"}
machines = self.savegame.machines
coords = {}
for m in machines:
c = m.get_coordinates()
mtype = m.get_type()
if m.identifier == selected_machine_id:
mtype = "Selected"
if mtype not in coords:
coords[mtype] = {"x": [], "y": [], "z": []}
coords[mtype]["x"].append(c[0])
coords[mtype]["y"].append(c[2]) # Flip y/z
coords[mtype]["z"].append(c[1])
for mtype in coords:
ax.scatter(np.array(coords[mtype]["x"]), np.array(coords[mtype]["y"]), np.array(coords[mtype]["z"]),
c=colors[mtype], marker=markers[mtype], label=mtype)
player = self.savegame.get_player_position()
ax.scatter(np.array(player[0]), np.array(player[2]), np.array(player[1]), c="red", marker="*", label="Player")
ax.grid(False) # Hide grid lines
ax.legend()
plt.show()
def randomize_machine_color(self):
machine = self.get_selected_machine()
if not machine:
return
machine.randomize_color()
self.update_statustext("Machine {} color changed".format(machine.get_name_or_id()))
self.savegame.save()
def change_machine_color(self):
machine = self.get_selected_machine()
if not machine:
return
col = colorchooser.askcolor()
if not col[0]:
return
machine.set_color(col[0])
self.update_statustext("Machine {} color changed".format(machine.get_name_or_id()))
self.savegame.save()
def replace_machine_color(self):
machine = self.get_selected_machine()
if not machine:
return
col = colorchooser.askcolor()
if not col[0]:
return
# Default color is (180, 180, 180), left upper in PN color chooser
machine.set_color(col[0], (180, 180, 180))
self.update_statustext("Machine {} color changed".format(machine.get_name_or_id()))
self.savegame.save()
def export_save(self):
zipdir = os.path.dirname(self.current_file)
save_id = re.search(r"_(\d+)\.db$", self.current_file).group(1)
# Load _main.db for meta data
dbconnector = sqlite3.connect(os.path.join(zipdir, "_main.db"))
maindb = dbconnector.cursor()
maindb.row_factory = sqlite3.Row
maindb.execute("select * from saves where id = ?", (save_id,))
row = maindb.fetchone()
metadata = {}
for k in row.keys():
metadata[k] = row[k]
del (metadata["id"], metadata["thumbnail"])
# Generate a safe name from the saved game title
zipname = "{}.pnsave.zip".format(re.sub(r"[^a-zA-Z0-9._-]+", "-", metadata["name"]))
fullname = os.path.join(zipdir, zipname)
# Open zip file and write save and meta. Try to compress it
try:
myzip = zipfile.ZipFile(fullname, "w", zipfile.ZIP_BZIP2)
except RuntimeError:
myzip = zipfile.ZipFile(fullname, "w")
stripped_name = "save_00.db"
myzip.write(self.current_file, arcname=stripped_name)
myzip.writestr("meta.json", json.dumps(metadata))
myzip.close()
self.update_statustext("Exported current save to %s\n in %s" % (zipname, zipdir))
def import_save(self):
# Select import file
opts = {"filetypes": [("PN export files", "*.pnsave.zip"), ("All files", ".*")]}
os_name = platform.system()
if os_name == "Linux":
opts["initialdir"] = os.path.expanduser("~/.config/unity3d/Craneballs/PlanetNomads/")
elif os_name == "Windows":
opts["initialdir"] = os.path.expanduser("~\AppData\LocalLow\Craneballs\PlanetNomads")
# TODO mac
importfilename = filedialog.askopenfilename(**opts)
if not importfilename:
return
# See if the _main.db is in the same directory, or let user select correct directory
importdir = os.path.dirname(importfilename)
mainfile = os.path.join(importdir, "_main.db")
if not os.path.exists(mainfile):
mainfile = os.path.join(str(opts["initialdir"]), "_main.db")
if not os.path.exists(mainfile):
opts["filetypes"] = [("PN main database", "_main.db"), ("All files", ".*")]
mainfile = filedialog.askopenfilename(**opts)
if not mainfile:
return
if not os.path.exists(mainfile):
self.update_statustext("Can't import without _main.db")
return
# Load _main.db
dbconnector = sqlite3.connect(mainfile)
maindb = dbconnector.cursor()
maindb.execute("select max(id) from saves")
next_id = maindb.fetchone()[0] + 1
# Load zip
try:
with zipfile.ZipFile(importfilename) as myzip:
myzip.extract("save_00.db")
# TODO detect correct file name scheme from existing saves
shutil.move("save_00.db", os.path.join(os.path.dirname(mainfile), "save_%i.db" % next_id))
metajson = myzip.read("meta.json")
meta = json.loads(metajson)
maindb.execute(
"insert into saves (type, id_master_autosave, name, created, modified, base_seed_string, "
"world_name) values (:type, -1, :name, :created, :modified, :base_seed_string, :world_name)",
meta)
dbconnector.commit()
self.update_statustext("Imported %s game '%s' as id %i" % (meta["type"], meta["name"], next_id))
except RuntimeError:
self.update_statustext("Could not load the exported file.\n Maybe the bzip2 extension is missing.")
except OSError:
self.update_statustext("Could not create the file")
if __name__ == "__main__":
window = Tk()
app = GUI(window)
window.mainloop()
|
mit
|
yyjiang/scikit-learn
|
sklearn/cross_decomposition/pls_.py
|
187
|
28507
|
"""
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
|
bsd-3-clause
|
mhue/scikit-learn
|
examples/neighbors/plot_regression.py
|
349
|
1402
|
"""
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
|
bsd-3-clause
|
JT5D/scikit-learn
|
benchmarks/bench_glmnet.py
|
297
|
3848
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
amosonn/distributed
|
distributed/joblib.py
|
2
|
2934
|
from __future__ import print_function, division, absolute_import
from distutils.version import LooseVersion
from tornado import gen
from .client import Client, _wait
from .utils import ignoring
# A user could have installed joblib, sklearn, both, or neither. Further, only
# joblib >= 0.10.0 supports backends, so we also need to check for that. This
# bit of logic is to ensure that we create and register the backend for all
# viable installations of joblib.
joblib = sk_joblib = None
with ignoring(ImportError):
import joblib
if LooseVersion(joblib.__version__) < '0.10.2':
joblib = None
with ignoring(ImportError):
import sklearn.externals.joblib as sk_joblib
if LooseVersion(sk_joblib.__version__) < '0.10.2':
sk_joblib = None
if joblib:
from joblib.parallel import (ParallelBackendBase,
AutoBatchingMixin)
elif sk_joblib:
from sklearn.externals.joblib.parallel import (
ParallelBackendBase, AutoBatchingMixin)
else:
raise RuntimeError("Joblib backend requires either `joblib` >= '0.10.2' "
" or `sklearn` > '0.17.1'. Please install or upgrade")
class DaskDistributedBackend(ParallelBackendBase, AutoBatchingMixin):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
def __init__(self, scheduler_host='127.0.0.1:8786', loop=None):
self.client = Client(scheduler_host, loop=loop)
self.futures = set()
def configure(self, n_jobs=1, parallel=None, **backend_args):
return self.effective_n_jobs(n_jobs)
def effective_n_jobs(self, n_jobs=1):
return sum(self.client.ncores().values())
def apply_async(self, func, *args, **kwargs):
callback = kwargs.pop('callback', None)
kwargs['pure'] = False
future = self.client.submit(func, *args, **kwargs)
self.futures.add(future)
@gen.coroutine
def callback_wrapper():
result = yield _wait([future])
self.futures.remove(future)
callback(result) # gets called in separate thread
self.client.loop.add_callback(callback_wrapper)
future.get = future.result # monkey patch to achieve AsyncResult API
return future
def abort_everything(self, ensure_ready=True):
# Tell the client to cancel any task submitted via this instance
# as joblib.Parallel will never access those results.
self.client.cancel(self.futures)
self.futures.clear()
DistributedBackend = DaskDistributedBackend
# Register the backend with any available versions of joblib
if joblib:
joblib.register_parallel_backend('distributed', DaskDistributedBackend)
joblib.register_parallel_backend('dask.distributed', DaskDistributedBackend)
if sk_joblib:
sk_joblib.register_parallel_backend('distributed', DaskDistributedBackend)
sk_joblib.register_parallel_backend('dask.distributed', DaskDistributedBackend)
|
bsd-3-clause
|
iut-ibk/DynaMind-UrbanSim
|
3rdparty/opus/src/opus_core/generic_model_explorer.py
|
2
|
9976
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import zeros, take, ones
from opus_core.misc import unique
from opus_core.datasets.dataset import DatasetSubset
from opus_core.variables.variable_name import VariableName
class GenericModelExplorer(object):
def get_model(self):
"""Return a model object."""
return self.model_system.run_year_namespace["model"]
def get_dataset(self, dataset_name):
"""Return a Dataset object of the given name."""
ds = self.model_system.run_year_namespace.get(dataset_name, None)
if ds is None:
ds = self.model_system.run_year_namespace["datasets"][dataset_name]
return ds
def get_data(self, coefficient, submodel=-2):
"""Calls method get_data of the Model object. Should return a data array for the
given coefficient and submodel. Can be used only on in models that are estimable."""
return self.get_model().get_data(coefficient, submodel)
def get_coefficient_names(self, submodel=-2):
"""Calls method get_coefficient_names of the Model object which should return
coefficient names for the given submodel. Can be used only on in models that are estimable."""
return self.get_model().get_coefficient_names(submodel)
def get_data_as_dataset(self, submodel=-2):
"""Calls method get_data_as_dataset of the Model object which should return
an object of class Dataset containing model data.
Works only for ChoiceModel (returns InteractionDataset),
and for RegressionModel (returns Dataset).
"""
return self.get_model().get_data_as_dataset(submodel)
def get_choice_set(self):
"""Return a Dataset of choices. Works only for the ChoiceModel class.
"""
return self.get_model().model_interaction.interaction_dataset.get_dataset(2)
def get_choice_set_index(self):
"""Return an array of indices of choices. Works only for the ChoiceModel class.
"""
return self.get_model().model_interaction.interaction_dataset.get_index(2)
def get_choice_set_index_for_submodel(self, submodel):
"""Return an array of indices of choices for the given submodel.
Works only for the ChoiceModel class.
"""
index = self.get_choice_set_index()
return take (index, indices=self.get_agent_set_index_for_submodel(submodel), axis=0)
def get_agent_set(self):
"""Return a Dataset of all agents. Works only for the ChoiceModel class.
"""
return self.get_model().model_interaction.interaction_dataset.get_dataset(1)
def get_agent_set_index(self):
"""Return an array of indices of agents that are the choosers.
Works only for the ChoiceModel class.
"""
return self.get_model().model_interaction.interaction_dataset.get_index(1)
def get_agent_set_index_for_submodel(self, submodel):
"""Return an array of indices of agents for the given submodel that are the choosers.
Works only for the ChoiceModel class.
"""
model = self.get_model()
return model.observations_mapping[submodel]
def get_active_agent_set(self):
"""Return agent set that make choices in the model.
Works only for the ChoiceModel class.
"""
agents = self.get_agent_set()
return DatasetSubset(agents, self.get_agent_set_index())
def _get_before_after_dataset_from_attribute(self, var_name, storage, **kwargs):
dataset_name = var_name.get_dataset_name()
ds = self.get_dataset(dataset_name)
ds.copy_attribute_by_reload(var_name, storage=storage, **kwargs)
return ds
def get_before_after_attribute(self, attribute_name):
"""Return a dictionary with elements 'before' (contains an array of the given attribute
that is reloaded from the cache) and 'after' (contains an array of the given attribute
with the current values).
"""
from opus_core.store.attribute_cache import AttributeCache
var_name = VariableName(attribute_name)
storage = AttributeCache(self.simulation_state.get_cache_directory())
ds = self._get_before_after_dataset_from_attribute(var_name, storage=storage,
package_order=self.model_system.run_year_namespace["dataset_pool"].get_package_order())
return {'after': ds.get_attribute(var_name.get_alias()),
'before': ds.get_attribute('%s_reload__' % var_name.get_alias())}
def summary_before_after(self, attribute_name):
"""Print summary of the given attribute 'before' (values
reloaded from the cache) and 'after' (current values).
"""
from opus_core.store.attribute_cache import AttributeCache
var_name = VariableName(attribute_name)
storage = AttributeCache(self.simulation_state.get_cache_directory())
ds = self._get_before_after_dataset_from_attribute(var_name, storage=storage,
package_order=self.model_system.run_year_namespace["dataset_pool"].get_package_order())
print ''
print 'Before model run:'
print '================='
ds.summary(names=['%s_reload__' % var_name.get_alias()])
print ''
print 'After model run:'
print '================='
ds.summary(names=[var_name.get_alias()])
def model_dependencies(self):
"""Prints out variable dependencies for the model."""
from opus_core.variables.dependency_query import DependencyChart
model, group = self.get_model_name()
chart = DependencyChart(self.xml_configuration, model=model, model_group=group,
specification=self.get_specification())
chart.print_model_dependencies()
def variable_dependencies(self, name):
"""Prints out dependencies of this variable. 'name' can be either an alias from
the model specification or an expression."""
from opus_core.variables.dependency_query import DependencyChart
varname = None
allvars = self.get_specification().get_variable_names()
for ivar in range(len(allvars)):
thisvar = allvars[ivar]
if not isinstance(thisvar, VariableName):
thisvar = VariableName(thisvar)
if name == thisvar.get_alias():
varname = thisvar
break
if varname is None:
varname = VariableName(name)
chart = DependencyChart(self.xml_configuration)
chart.print_dependencies(varname.get_expression())
def compute_expression(self, attribute_name):
"""Compute any expression and return its values."""
var_name = VariableName(attribute_name)
dataset_name = var_name.get_dataset_name()
ds = self.get_dataset(dataset_name)
return ds.compute_variables([var_name], dataset_pool=self.model_system.run_year_namespace["dataset_pool"])
def plot_histogram_before_after(self, attribute_name, bins=None):
"""Plot histograms of values returned by the method get_before_after_attribute."""
from opus_core.plot_functions import create_histogram, show_plots
from matplotlib.pylab import figure
values = self.get_before_after_attribute(attribute_name)
alias = VariableName(attribute_name).get_alias()
fig = figure()
fig.add_subplot(121)
create_histogram(values['before'], main='%s (before)' % alias, bins=bins)
fig.add_subplot(122)
create_histogram(values['after'], main='%s (after)' % alias, bins=bins)
show_plots()
def get_correlation(self, submodel=-2):
"""Return an array of correlations between all variables of the model data (for given submodel).
Works only for ChoiceModel and RegressionModel"""
ds = self.get_data_as_dataset(submodel)
attrs = [attr for attr in ds.get_known_attribute_names() if attr not in ds.get_id_name()]
return ds.correlation_matrix(attrs)
def get_model_name(self):
""" Must be defined in a child class. A tuple of model name and model group.
"""
raise NotImplementedError('get_model_name')
def plot_correlation(self, submodel=-2, useR=False, **kwargs):
"""Plot correlations between all variables of the model data (for given submodel).
Works only for ChoiceModel and RegressionModel"""
ds = self.get_data_as_dataset(submodel)
attrs = [attr for attr in ds.get_known_attribute_names() if attr not in ds.get_id_name()]
ds.correlation_image(attrs, useR=useR, **kwargs)
def plot_choice_set(self):
"""Plot map of the sampled choice set."""
choice_set = self.get_choice_set()
result = zeros(choice_set.size(), dtype='int16')
result[unique(self.get_choice_set_index().ravel())] = 1
dummy_attribute_name = '__sampled_choice_set__'
choice_set.add_attribute(name=dummy_attribute_name, data=result)
choice_set.plot_map(dummy_attribute_name, background=-1)
choice_set.delete_one_attribute(dummy_attribute_name)
def plot_choice_set_attribute(self, name):
"""Plot map of the given attribute for the sampled choice set."""
choice_set = self.get_choice_set()
filter_var = ones(choice_set.size(), dtype='int16')
filter_var[unique(self.get_choice_set_index().ravel())] = 0
dummy_attribute_name = '__sampled_choice_set_filter__'
choice_set.add_attribute(name=dummy_attribute_name, data=filter_var)
choice_set.plot_map(name, filter=dummy_attribute_name)
choice_set.delete_one_attribute(dummy_attribute_name)
|
gpl-2.0
|
hrichstein/Stellar_mass_env_Density
|
Codes/Scripts/clean_copy.py
|
1
|
75233
|
from __future__ import division, absolute_import
import astropy.stats
import glob
import math
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import os
import pandas as pd
from scipy import optimize,spatial
###############################################################################
###############################################################################
###############################################################################
__author__ =['Victor Calderon']
__copyright__ =["Copyright 2016 Victor Calderon, Index function"]
__email__ =['[email protected]']
__maintainer__ =['Victor Calderon']
def Index(directory, datatype):
"""
Indexes the files in a directory `directory' with a
specific data type.
Parameters
----------
directory: str
Absolute path to the folder that is indexed.
datatype: str
Data type of the files to be indexed in the folder.
Returns
-------
file_array: array_like
np.array of indexed files in the folder 'directory'
with specific datatype.
Examples
--------
>>> Index('~/data', '.txt')
>>> array(['A.txt', 'Z'.txt', ...])
"""
assert(os.path.exists(directory))
files = np.array(glob.glob('{0}/*{1}'.format(directory, datatype)))
return files
###############################################################################
def myceil(x, base=10):
"""
Returns the upper-bound integer of 'x' in base 'base'.
Parameters
----------
x: float
number to be approximated to closest number to 'base'
base: float
base used to calculate the closest 'largest' number
Returns
-------
n_high: float
Closest float number to 'x', i.e. upper-bound float.
Example
-------
>>>> myceil(12,10)
20
>>>>
>>>> myceil(12.05, 0.1)
12.10000
"""
n_high = float(base*math.ceil(float(x)/base))
return n_high
###############################################################################
def myfloor(x, base=10):
"""
Returns the lower-bound integer of 'x' in base 'base'
Parameters
----------
x: float
number to be approximated to closest number of 'base'
base: float
base used to calculate the closest 'smallest' number
Returns
-------
n_low: float
Closest float number to 'x', i.e. lower-bound float.
Example
-------
>>>> myfloor(12, 5)
>>>> 10
"""
n_low = float(base*math.floor(float(x)/base))
return n_low
###############################################################################
def Bins_array_create(arr, base=10):
"""
Generates array between [arr.min(), arr.max()] in steps of `base`.
Parameters
----------
arr: array_like, Shape (N,...), One-dimensional
Array of numerical elements
base: float, optional (default=10)
Interval between bins
Returns
-------
bins_arr: array_like
Array of bin edges for given arr
"""
base = float(base)
arr = np.array(arr)
assert(arr.ndim==1)
arr_min = myfloor(arr.min(), base=base)
arr_max = myceil( arr.max(), base=base)
bins_arr = np.arange(arr_min, arr_max+0.5*base, base)
return bins_arr
###############################################################################
###############################################################################
###############################################################################
def sph_to_cart(ra,dec,cz):
"""
Converts spherical coordinates to Cartesian coordinates.
Parameters
----------
ra: array-like
right-ascension of galaxies in degrees
dec: array-like
declination of galaxies in degrees
cz: array-like
velocity of galaxies in km/s
Returns
-------
coords: array-like, shape = N by 3
x, y, and z coordinates
"""
cz_dist = cz/70. #converts velocity into distance
x_arr = cz_dist*np.cos(np.radians(ra))*np.cos(np.radians(dec))
y_arr = cz_dist*np.sin(np.radians(ra))*np.cos(np.radians(dec))
z_arr = cz_dist*np.sin(np.radians(dec))
coords = np.column_stack((x_arr,y_arr,z_arr))
return coords
############################################################################
def calc_dens(n_val,r_val):
"""
Returns densities of spheres with radius being the distance to the
nth nearest neighbor.
Parameters
----------
n_val = integer
The 'N' from Nth nearest neighbor
r_val = array-like
An array with the distances to the Nth nearest neighbor for
each galaxy
Returns
-------
dens: array-like
An array with the densities of the spheres created with radii
to the Nth nearest neighbor.
"""
dens = np.array([(3.*(n_val+1)/(4.*np.pi*r_val[hh]**3)) \
for hh in range(len(r_val))])
return dens
###############################################################################
def plot_calcs(mass,bins,dlogM,mass_err=False,ratio_err=False):
"""
Returns values for plotting the stellar mass function and
mass ratios
Parameters
----------
mass: array-like
A 1D array with mass values, assumed to be in order
bins: array=like
A 1D array with the values which will be used as the bin edges
by the histogram function
dlogM: float-like
The log difference between bin edges
Optional
--------
mass_err == True
Calculates the Poisson errors on the stellar mass function.
Returns mass_freq as a list with 2 array elements, the first being
the stellar mass function values, the second being the errors.
ratio_err == True
Calculates the Poisson errors on the density-based, mass ratios.
Creates empty list and appends ratio error arrays to it as they
are generated. Returns ratio_dict as a list. The first element is
a dictionary with the ratio values to be plotted. The second is a
three element list. Each element is an array with the error values
for each of the three density-cut ratios.
Returns
-------
bin_centers: array-like
An array with the medians mass values of the mass bins
mass-freq: array-like
Contains the number density values of each mass bin
ratio_dict: dictionary-like
A dictionary with three keys, corresponding to the divisors
2,4, and 10 (as the percentile cuts are based on these
divisions). Each key has the density-cut, mass ratios for
that specific cut (50/50 for 2; 25/75 for 4; 10/90 for 10).
"""
mass_counts, edges = np.histogram(mass,bins)
bin_centers = 0.5*(edges[:-1]+edges[1:])
mass_freq = mass_counts/float(len(mass))/dlogM
ratio_dict = {}
frac_val = [2,4,10]
if ratio_err == True:
yerr = []
for ii in frac_val:
ratio_dict[ii] = {}
# Calculations for the lower density cut
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
# Calculations for the higher density cut
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
# Ratio determination
ratio_counts = (1.*counts_2)/(1.*counts)
ratio_dict[ii] = ratio_counts
if ratio_err == True:
yerr.append((counts_2*1.)/(counts*1.)*\
np.sqrt(1./counts + 1./counts_2))
if mass_err == True:
mass_freq_list = [[] for xx in xrange(2)]
mass_freq_list[0] = mass_freq
mass_freq_list[1] = np.sqrt(mass_counts)/float(len(mass))/dlogM
mass_freq = np.array(mass_freq_list)
if ratio_err == True:
ratio_dict_list = [[] for xx in range(2)]
ratio_dict_list[0] = ratio_dict
ratio_dict_list[1] = yerr
ratio_dict = ratio_dict_list
return bin_centers, mass_freq, ratio_dict
###############################################################################
def bin_func(mass_dist,bins,kk,bootstrap=False):
"""
Returns median distance to Nth nearest neighbor
Parameters
----------
mass_dist: array-like
An array with mass values in at index 0 (when transformed) and distance
to the Nth nearest neighbor in the others
Example: 6239 by 7
Has mass values and distances to 6 Nth nearest neighbors
bins: array=like
A 1D array with the values which will be used as the bin edges
kk: integer-like
The index of mass_dist (transformed) where the appropriate distance
array may be found
Optional
--------
bootstrap == True
Calculates the bootstrap errors associated with each median distance
value. Creates an array housing arrays containing the actual distance
values associated with every galaxy in a specific bin. Bootstrap error
is then performed using astropy, and upper and lower one sigma values
are found for each median value. These are added to a list with the
median distances, and then converted to an array and returned in place
of just 'medians.'
Returns
-------
medians: array-like
An array with the median distance to the Nth nearest neighbor from
all the galaxies in each of the bins
"""
edges = bins
# print 'length bins:'
# print len(bins)
digitized = np.digitize(mass_dist.T[0],edges)
digitized -= int(1)
bin_nums = np.unique(digitized)
bin_nums_list = list(bin_nums)
# if 12 not in bin_nums:
# bin_nums.append(12)
# if 13 in bin_nums:
# bin_nums.remove(13
# if 13 not in bin_nums:
# bin_nums.append(13)
# if 14 in bin_nums:
# bin_nums.remove(14)
for jj in range(len(bins)-1):
if jj not in bin_nums:
bin_nums_list.append(jj)
# print 'appended'
# print bin_nums_list
if (len(bins)-1) in bin_nums_list:
bin_nums_list.remove(len(bins)-1)
# print 'removed'
# print bin_nums_list
if (len(bins)) in bin_nums_list:
bin_nums_list.remove(len(bins))
# print 'removed'
# print bin_nums_list
bin_nums = np.array(bin_nums_list)
for ii in bin_nums:
if len(mass_dist.T[kk][digitized==ii]) == 0:
temp_list = list(mass_dist.T[kk]\
[digitized==ii])
temp_list.append(np.zeros(len(bin_nums)))
mass_dist.T[kk][digitized==ii] = np.array(temp_list)
# print bin_nums
# print len(bin_nums)
medians = np.array([np.median(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
# print len(medians)
if bootstrap == True:
dist_in_bin = np.array([(mass_dist.T[kk][digitized==ii]) \
for ii in bin_nums])
for vv in range(len(dist_in_bin)):
if len(dist_in_bin[vv]) == 0:
dist_in_bin_list = list(dist_in_bin[vv])
dist_in_bin[vv] = np.zeros(len(dist_in_bin[0]))
low_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),16) \
for vv in range(len(dist_in_bin))])
high_err_test = np.array([np.percentile(astropy.stats.bootstrap\
(dist_in_bin[vv],bootnum=1000,bootfunc=np.median),84) \
for vv in range(len(dist_in_bin))])
med_list = [[] for yy in range(len(frac_vals))]
med_list[0] = medians
med_list[1] = low_err_test
med_list[2] = high_err_test
medians = np.array(med_list)
return medians
###############################################################################
def hist_calcs(mass,bins,dlogM,eco=False):
"""
Returns dictionaries with the counts for the upper
and lower density portions; calculates the
three different percentile cuts for each mass
array given
Parameters
----------
mass: array-like
A 1D array with log stellar mass values, assumed
to be an order which corresponds to the ascending
densities; (necessary, as the index cuts are based
on this)
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
Returns
-------
hist_dict_low: dictionary-like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the lower density cut
hist_dict_high: dictionary like
A dictionary with three keys (the frac vals), with arrays
as values. The values for the higher density cut
"""
hist_dict_low = {}
hist_dict_high = {}
frac_val = np.array([2,4,10])
frac_dict = {2:0,4:1,10:2}
if eco == True:
low_err = [[] for xx in xrange(len(frac_val))]
high_err = [[] for xx in xrange(len(frac_val))]
for ii in frac_val:
hist_dict_low[ii] = {}
hist_dict_high[ii] = {}
frac_data = int(len(mass)/ii)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
if eco == True:
low_err[frac_dict[ii]] = np.sqrt(counts)/len(frac_mass)/dlogM
hist_dict_low[ii] = low_counts
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
if eco == True:
high_err[frac_dict[ii]] = np.sqrt(counts_2)/len(frac_mass_2)/\
dlogM
hist_dict_high[ii] = high_counts
if eco == True:
hist_dict_low['low_err'] = low_err
hist_dict_high['high_err'] = high_err
return hist_dict_low, hist_dict_high
###############################################################################
def plot_all_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass ratio. Optimally
used with a well-initiated for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with three subplots showing appropriate ratios
"""
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
ax.plot(bin_centers,y_vals,color='silver')
###############################################################################
def plot_eco_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx,only=False):
"""
Returns subplots of ECO density-cut,mass ratios
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the ratio values for each mass bin
neigh_val: integer-like
Value which will be inserted into the text label of each plot
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Optional
--------
only == True
To be used when only plotting the ECO ratios, no mocks.
Will add in the additional plotting specifications that
would have been taken care of previously in a for-loop
which plotted the mocks as well
Returns
-------
ECO ratios plotted to any previously initialized figure
"""
if only == True:
if plot_idx ==16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
if col_num ==0:
title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==1:
title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num ==2:
title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
ax.set_xlim(9.1,11.9)
ax.set_ylim([0,5])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([1.,3.])
ax.tick_params(axis='both', labelsize=12)
ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0)
frac_vals = np.array([2,4,10])
y_vals_2 = y_vals[0][frac_vals[hh]]
ax.errorbar(bin_centers,y_vals_2,yerr=y_vals[1][hh],\
color='dodgerblue',linewidth=2)
###############################################################################
def plot_hists(mass,neigh_val,bins,dlogM,ax,col_num,plot_idx):
"""
Returns a plot showing the density-cut, mass counts.
Parameters
----------
mass: array-like
A 1D array with log stellar mass values
neigh_val: integer-like
Value which will be inserted into the text label of each plot
bins: array-like
A 1D array with the values which will be used as the bin edges
dlogM: float-like
The log difference between bin edges
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
col_num: integer-like
Integer which specifies which column is currently being
plotted. Used for labelling subplots
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
labeling the x-axis
Returns
-------
Figure with two curves, optionally (if uncommented) plotted in step
"""
ax.set_yscale('log')
if col_num==0:
title_label = 'Mass 50/50, {0} NN'.format(neigh_val)
frac_val = 2
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==1:
title_label = 'Mass 25/75, {0} NN'.format(neigh_val)
frac_val = 4
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
elif col_num==2:
title_label = 'Mass 10/90, {0} NN'.format(neigh_val)
frac_val = 10
ax.text(0.05, 0.95, title_label,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=12)
if plot_idx == 16:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_xlim(9.1,11.9)
ax.set_ylim([10**-3,10**1])
ax.set_xticks(np.arange(9.5, 12., 0.5))
ax.set_yticks([10**-2,10**0])
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
low_counts = (counts/float(len(frac_mass))/dlogM)
bins_cens = .5*(edges[:-1]+edges[1:])
# ax.step(bins_cens, low_counts, color='lightslategrey',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, low_counts, color='lightslategrey',alpha=0.1)
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
high_counts = (counts_2/float(len(frac_mass_2))/dlogM)
# ax.step(bins_cens, high_counts, color='lightslategray',where='mid',\
# alpha=0.1)
ax.plot(bins_cens, high_counts, color='lightslategray',alpha=0.1)
# res = np.array([low_counts,high_counts])
# return res
###############################################################################
def plot_eco_hists(mass,bins,dlogM,ax,col,plot_idx):
if col==0:
frac_val = 2
elif col==1:
frac_val = 4
elif col==2:
frac_val = 10
frac_data = (len(mass)/frac_val)
frac_mass = mass[0:frac_data]
counts, edges = np.histogram(frac_mass,bins)
bins_cens = .5*(edges[:-1]+edges[1:])
ax.step(bins_cens, (counts/float(len(frac_mass))/dlogM), color='lime',\
where='mid',label='Lower')
frac_mass_2 = mass[-frac_data:]
counts_2, edges_2 = np.histogram(frac_mass_2,bins)
ax.step(bins_cens, (counts_2/float(len(frac_mass_2))/dlogM), \
color='dodgerblue',where='mid',label='Higher')
if plot_idx == 0:
ax.legend(loc='best')
###############################################################################
def plot_all_meds(bin_centers,y_vals,ax,plot_idx):
"""
Returns six subplots showing the median distance to
the Nth nearest neighbor for each mass bin. Assumes a
previously defined figure. Best used in a for-loop
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin
"""
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.set_yticks(np.arange(0,12,1))
ax.set_yticklabels(np.arange(1,11,2))
ax.tick_params(axis='x', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20)
ax.plot(bin_centers,y_vals,color='silver')
#############################################################################
def plot_eco_meds(bin_centers,y_vals,low_lim,up_lim,ax,plot_idx,only=False):
"""
Returns six subplots showing the median Nth nearest neighbor distance for
ECO galaxies in each mass bin
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
y_vals: array-like
An array containing the median distance values for each mass bin
low_lim: array-like
An array with the lower cut-off of the bootstrap errors for each median
up_lim: array-like
An array with the upper cut-off of the bootstrap errors for each median
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
plot_idx: integer-like
Specifies which subplot the figure is plotted to. Used for
the text label in each subplot
Optional
--------
only == False
To be used when only plotting the ECO median trends,
no mocks. Will add in the additional plotting
specifications that would have been taken care of
previously in a for-loop which plotted the mocks as well
Returns
-------
Subplots displaying the median distance to Nth nearest neighbor
trends for each mass bin, with the bootstrap errors
"""
if only == True:
titles = [1,2,3,5,10,20]
ax.set_ylim(0,10**1.5)
ax.set_xlim(9.1,11.9)
ax.set_yscale('symlog')
ax.set_xticks(np.arange(9.5,12.,0.5))
ax.tick_params(axis='both', which='major', labelsize=16)
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.errorbar(bin_centers,y_vals,yerr=0.1,lolims=low_lim,\
uplims=up_lim,color='dodgerblue',label='ECO')
# if plot_idx == 5:
# ax.legend(loc='best')
###############################################################################
def plot_bands(bin_centers,upper,lower,ax):
"""
Returns an overlayed, fill-between plot, creating a band
between the different mock catalog values plotted
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
upper: array-like
Array with the max y-values among all the mocks
for each mass bin
lower: array-like
Array with the min y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
Returns
-------
A semi-transparent band overlaying the area of the plot
bordedodgerblue by the mocks
"""
ax.fill_between(bin_centers,upper,lower,color='silver',alpha=0.1)
###############################################################################
def plot_med_range(bin_centers,low_lim,up_lim,ax,alpha,color='gray'):
"""
Returns a plot with a transparent band highlighting a range of
values.
Parameters
----------
bin_centers: array-like
An array with the medians mass values of the mass bins
low_lim: array-like
Array with the min y-values among all the mocks
for each mass bin
up_lim: array-like
Array with the max y-values among all the mocks
for each mass bin
ax: axis-like
A value which specifies which axis each subplot is to be
plotted to
alpha: float-like
A value which will determine the tranparency of the band
color: str
Any color which Python recognizes; sets the color of the band
Returns
-------
A band spanning from the max y-values to the minimum.
"""
ax.fill_between(bin_centers,low_lim,up_lim,color=color,alpha=alpha)
##############################################################################
##############################################################################
##############################################################################
dirpath = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
dirpath += r"\Catalogs\Resolve_plk_5001_so_mvir_scatter_ECO_Mocks_"
dirpath += r"scatter_mocks\Resolve_plk_5001_so_mvir_scatter0p1_ECO_Mocks"
usecols = (0,1,8,13)
dlogM = 0.2
##Add in column 4, HALO ID
##############################################################################
##############################################################################
##############################################################################
ECO_cats = (Index(dirpath,'.dat'))
names = ['ra','dec','cz','logMstar']
PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= usecols,header=None,\
skiprows=2,names=names)) for ii in range(len(ECO_cats))]
PD_comp = [(PD[ii][PD[ii].logMstar >= 9.1]) for ii in range(len(ECO_cats))]
# PD_test = [(PD[ii][PD[ii].logMstar >= 11.7]) for ii in range(len(ECO_cats))]
# for ii in range(len(PD_test)):
# print len(PD_test[ii])
min_max_mass_arr = []
for ii in range(len(PD_comp)):
min_max_mass_arr.append(max(PD_comp[ii].logMstar))
min_max_mass_arr.append(min(PD_comp[ii].logMstar))
min_max_mass_arr = np.array(min_max_mass_arr)
bins = Bins_array_create(min_max_mass_arr,dlogM)
bins+= 0.1
bins_list = list(bins)
for ii in bins:
if ii > 11.7:
bins_list.remove(ii)
bins = np.array(bins_list)
num_of_bins = int(len(bins) - 1)
ra_arr = np.array([(np.array(PD_comp[ii])).T[0] \
for ii in range(len(PD_comp))])
dec_arr = np.array([(np.array(PD_comp[ii])).T[1] \
for ii in range(len(PD_comp))])
cz_arr = np.array([(np.array(PD_comp[ii])).T[2] \
for ii in range(len(PD_comp))])
mass_arr = np.array([(np.array(PD_comp[ii])).T[3] \
for ii in range(len(PD_comp))])
coords_test = np.array([sph_to_cart(ra_arr[vv],dec_arr[vv],cz_arr[vv]) \
for vv in range(len(ECO_cats))])
neigh_vals = np.array([1,2,3,5,10,20])
nn_arr = [[] for xx in xrange(len(coords_test))]
nn_arr_nn = [[] for yy in xrange(len(neigh_vals))]
for vv in range(len(coords_test)):
nn_arr[vv] = spatial.cKDTree(coords_test[vv])
nn_arr[vv] = np.array(nn_arr[vv].query(coords_test[vv],21)[0])
nn_specs = [(np.array(nn_arr).T[ii].T[neigh_vals].T) for ii in \
range(len(coords_test))]
nn_mass_dist = np.array([(np.column_stack((mass_arr[qq],nn_specs[qq]))) \
for qq in range(len(coords_test))])
###############################################################################
sat_cols = (13,25)
sat_names = ['logMstar','cent_sat_flag']
SF_PD = [(pd.read_csv(ECO_cats[ii],sep="\s+", usecols= sat_cols,\
header=None,
skiprows=2,names=sat_names)) for ii in range(8)]
SF_PD_comp = [(SF_PD[ii][SF_PD[ii].logMstar >= 9.1]) for ii in \
range(len(ECO_cats))]
sats_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==0])) \
for ii in range(len(SF_PD_comp))])
cents_num = np.array([(len(SF_PD_comp[ii][SF_PD_comp[ii].cent_sat_flag==1])) \
for ii in range(len(SF_PD_comp))])
gal_tot = np.array([(len(SF_PD_comp[ii])) for ii in range(len(SF_PD_comp))])
print 'SAT_FRAC = {0}'.format(sats_num/gal_tot)
###############################################################################
nn_dist = {}
nn_dens = {}
mass_dat = {}
ratio_info = {}
mass_freq = [[] for xx in xrange(len(coords_test))]
for ii in range(len(coords_test)):
nn_dist[ii] = {}
nn_dens[ii] = {}
mass_dat[ii] = {}
ratio_info[ii] = {}
nn_dist[ii]['mass'] = nn_mass_dist[ii].T[0]
for jj in range(len(neigh_vals)):
nn_dist[ii][(neigh_vals[jj])] = np.array(nn_mass_dist[ii].T\
[range(1,len(neigh_vals)+1)[jj]])
nn_dens[ii][(neigh_vals[jj])] = np.column_stack((nn_mass_dist[ii].T\
[0],calc_dens(neigh_vals[jj],\
nn_mass_dist[ii].T[range(1,len\
(neigh_vals)+1)[jj]])))
idx = np.array([nn_dens[ii][neigh_vals[jj]].T[1].argsort()])
mass_dat[ii][(neigh_vals[jj])] = (nn_dens[ii][neigh_vals[jj]]\
[idx].T[0])
bin_centers, mass_freq[ii], ratio_info[ii][neigh_vals[jj]] = \
plot_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
all_mock_meds = [[] for xx in range(len(nn_mass_dist))]
for vv in range(len(nn_mass_dist)):
all_mock_meds[vv] = np.array([bin_func(nn_mass_dist[vv],bins,(jj+1)) \
for jj in range(len(nn_mass_dist[vv].T)-1)])
med_plot_arr = [([[] for yy in xrange(len(nn_mass_dist))]) \
for xx in xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
med_plot_arr[ii][jj] = all_mock_meds[jj][ii]
# for ii in range(len(neigh_vals)):
# for jj in range(len(nn_mass_dist)):
# print len(all_mock_meds[jj][ii])
mass_freq_plot = (np.array(mass_freq))
max_lim = [[] for xx in range(len(mass_freq_plot.T))]
min_lim = [[] for xx in range(len(mass_freq_plot.T))]
for jj in range(len(mass_freq_plot.T)):
max_lim[jj] = max(mass_freq_plot.T[jj])
min_lim[jj] = min(mass_freq_plot.T[jj])
###############################################################################
# ordered_mass = nn_mass_dist[0].T[0][(nn_mass_dist[0].T[0].argsort())]
# dist_cont = [[[[] for zz in xrange(len(bins)-1)] for yy in \
# xrange(len(nn_mass_dist))] for xx in \
# xrange(1,len(nn_mass_dist[0].T))]
# for ii in xrange(len(nn_mass_dist)):
# sorting_test = np.digitize(nn_mass_dist[ii].T[0],bins)
# bin_nums = np.unique(sorting_test)
# bin_nums_list = list(bin_nums)
# # if 13 not in bin_nums:
# # bin_nums_list.append(13)
# # if 14 not in bin_nums:
# # bin_nums_list.append(14)
# # bin_nums = np.array(bin_nums_list)
# # if 14 in bin_nums_list:
# # bin_nums_list.remove(14)
# # bin_nums = np.array(bin_nums_list)
# for dd in range(1,num_of_bins+1):
# if dd not in bin_nums:
# bin_nums_list.append(dd)
# if len(bins) in bin_nums_list:
# bin_nums_list.remove(len(bins))
# bin_nums = np.array(bin_nums_list)
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# dist_cont[jj-1][ii][hh-1] = (nn_mass_dist[ii].T[jj]\
# [sorting_test==hh])
# if len(dist_cont[jj-1][ii][hh-1]) == 0:
# (dist_cont[jj-1][ii][hh-1]) = list(dist_cont[jj-1][ii][hh-1])
# (dist_cont[jj-1][ii][hh-1]).append(np.zeros\
# (len(dist_cont[1][0][0])))
# (dist_cont[jj-1][ii][hh-1]) = np.array((dist_cont[jj-1][ii]\
# [hh-1]))
# for ii in xrange(len(nn_mass_dist)):
# for jj in xrange(1,len(nn_mass_dist[ii].T)):
# for hh in bin_nums:
# print len(dist_cont[jj-1][ii][hh-1])
###############################################################################
# top_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_68 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# top_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# low_95 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# med_50 = [[[[]for ll in xrange(len(bin_nums))]for yy in \
# xrange(len(nn_mass_dist))] for xx in xrange(len(neigh_vals))]
# for aa in xrange(len(neigh_vals)):
# for bb in xrange(len(nn_mass_dist)):
# for cc in xrange(len(dist_cont[aa][bb])):
# top_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],84)
# low_68[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],16)
# top_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],97.5)
# low_95[aa][bb][cc] = np.percentile(dist_cont[aa][bb][cc],2.5)
# med_50[aa][bb][cc] = np.median((dist_cont[aa][bb][cc]))
# top_68 = np.array(top_68)
# low_68 = np.array(low_68)
# top_95 = np.array(top_95)
# low_95 = np.array(low_95)
# med_50 = np.array(med_50)
##not working with 1 dec scatter...
###############################################################################
frac_vals = [2,4,10]
nn_plot_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
nn_plot_arr[ii][jj] = (ratio_info[jj][neigh_vals[ii]])
plot_frac_arr = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(nn_plot_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_frac_arr[ii][hh][jj] = nn_plot_arr[ii][jj][frac_vals[hh]]
###############################################################################
###############################################################################
###############################################################################
eco_path = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density"
eco_path += r"\Catalogs\ECO_true"
eco_cols = np.array([0,1,2,4])
###############################################################################
###############################################################################
###############################################################################
ECO_true = (Index(eco_path,'.txt'))
names = ['ra','dec','cz','logMstar']
PD_eco = pd.read_csv(ECO_true[0],sep="\s+", usecols=(eco_cols),header=None,\
skiprows=1,names=names)
eco_comp = PD_eco[PD_eco.logMstar >= 9.1]
ra_eco = (np.array(eco_comp)).T[0]
dec_eco = (np.array(eco_comp)).T[1]
cz_eco = (np.array(eco_comp)).T[2]
mass_eco = (np.array(eco_comp)).T[3]
coords_eco = sph_to_cart(ra_eco,dec_eco,cz_eco)
eco_neighbor_tree = spatial.cKDTree(coords_eco)
eco_tree_dist = np.array(eco_neighbor_tree.query(coords_eco,\
(neigh_vals[-1]+1))[0])
eco_mass_dist = np.column_stack((mass_eco,eco_tree_dist.T[neigh_vals].T))
##range 1,7 because of the six nearest neighbors (and fact that 0 is mass)
##the jj is there to specify which index in the [1,6] array
eco_dens = ([calc_dens(neigh_vals[jj],\
(eco_mass_dist.T[range(1,7)[jj]])) for jj in range\
(len(neigh_vals))])
eco_mass_dens = [(np.column_stack((mass_eco,eco_dens[ii]))) for ii in \
range(len(neigh_vals))]
eco_idx = [(eco_mass_dens[jj].T[1].argsort()) for jj in \
range(len(neigh_vals))]
eco_mass_dat = [(eco_mass_dens[jj][eco_idx[jj]].T[0]) for jj in \
range(len(neigh_vals))]
eco_ratio_info = [[] for xx in xrange(len(eco_mass_dat))]
for qq in range(len(eco_mass_dat)):
bin_centers, eco_freq, eco_ratio_info[qq] = plot_calcs(eco_mass_dat[qq],\
bins,dlogM,mass_err=True,ratio_err=True)
eco_medians = [[] for xx in xrange(len(eco_mass_dat))]
for jj in (range(len(eco_mass_dat))):
eco_medians[jj] = np.array(bin_func(eco_mass_dist,bins,(jj+1),\
bootstrap=True))
###############################################################################
###############################################################################
fig,ax = plt.subplots(figsize=(8,8))
ax.set_title('Mass Distribution',fontsize=18)
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
ax.set_ylabel(r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$',fontsize=20)
ax.set_yscale('log')
ax.set_xlim(9.1,11.9)
ax.tick_params(axis='both', labelsize=14)
for ii in range(len(mass_freq)):
ax.plot(bin_centers,mass_freq[ii],color='silver')
ax.fill_between(bin_centers,max_lim,min_lim,color='silver',alpha=0.1)
ax.errorbar(bin_centers,eco_freq[0],yerr=eco_freq[1],color='dodgerblue',\
linewidth=2,label='ECO')
ax.legend(loc='best')
plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.94,\
hspace=0.2,wspace=0.2)
plt.show()
###############################################################################
A = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
zz_num = len(plot_frac_arr[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(zz_num):
zz_arr = np.array(plot_frac_arr[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(zz_arr)
if cc == 0:
zz_tot = np.zeros((n_elem,1))
zz_tot = np.insert(zz_tot,len(zz_tot.T),zz_arr,1)
zz_tot = np.array(np.delete(zz_tot,0,axis=1))
for kk in xrange(len(zz_tot)):
zz_tot[kk][zz_tot[kk] == np.inf] = np.nan
zz_tot_max = [np.nanmax(zz_tot[kk]) for kk in xrange(len(zz_tot))]
zz_tot_min = [np.nanmin(zz_tot[kk]) for kk in xrange(len(zz_tot))]
A[bin_str] = [zz_tot_max,zz_tot_min]
###############################################################################
np.seterr(divide='ignore',invalid='ignore')
nrow_num = int(6)
ncol_num = int(3)
zz = int(0)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(100,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'High Density Counts/Lower Density Counts', ha='center', \
va='center',rotation='vertical',fontsize=20)
# fig.suptitle("Percentile Trends", fontsize=18)
while zz <= 16:
for ii in range(len(eco_ratio_info)):
for hh in range(len(eco_ratio_info[0][1])):
for jj in range(len(nn_mass_dist)):
# upper = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][0]
# lower = A['{0}_{1}'.format(neigh_vals[ii],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz] )
plot_all_rats(bin_centers,(plot_frac_arr[ii][hh][jj]),\
neigh_vals[ii],axes_flat[zz],hh,zz)
plot_eco_rats(bin_centers,(eco_ratio_info[ii]),neigh_vals[ii],\
axes_flat[zz],hh,zz)
zz += 1
plt.subplots_adjust(left=0.04, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
B = {}
yy_num = len(med_plot_arr[0])
for nn in range(len(med_plot_arr)):
for ii in range(yy_num):
med_str = '{0}'.format(nn)
yy_arr = med_plot_arr[nn][ii]
n_y_elem = len(yy_arr)
if ii == 0:
yy_tot = np.zeros((n_y_elem,1))
yy_tot = np.insert(yy_tot,len(yy_tot.T),yy_arr,1)
yy_tot = np.array(np.delete(yy_tot,0,axis=1))
yy_tot_max = [np.nanmax(yy_tot[kk]) for kk in xrange(len(yy_tot))]
yy_tot_min = [np.nanmin(yy_tot[kk]) for kk in xrange(len(yy_tot))]
B[med_str] = [yy_tot_max,yy_tot_min]
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, 'Distance to Nth Neighbor (Mpc)', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=4:
for ii in range(len(med_plot_arr)):
for vv in range(len(nn_mass_dist)):
# lower_m = B['{0}'.format(ii)][0]
# upper_m = B['{0}'.format(ii)][1]
# plot_med_range(bin_centers,top_95[ii][vv],low_95[ii][vv],\
# axes_flat[zz],0.05,color='lightsteelblue')
# plot_med_range(bin_centers,top_68[ii][vv],low_68[ii][vv],\
# axes_flat[zz],0.15,color='gainsboro')
# plot_bands(bin_centers,upper_m,lower_m,axes_flat[zz])
plot_all_meds(bin_centers,med_plot_arr[ii][vv],axes_flat[zz],\
zz)
plot_eco_meds(bin_centers,eco_medians[ii][0],\
eco_medians[ii][1],eco_medians[ii][2],\
axes_flat[zz],zz)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=0.98, top=0.98,\
hspace=0,wspace=0)
plt.show()
###############################################################################
hist_low_info = {}
hist_high_info = {}
for ii in xrange(len(coords_test)):
hist_low_info[ii] = {}
hist_high_info[ii] = {}
for jj in range(len(neigh_vals)):
hist_low_info[ii][neigh_vals[jj]],hist_high_info[ii][neigh_vals[jj]] \
= hist_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM)
frac_vals = [2,4,10]
hist_low_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
hist_high_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in \
xrange(len(neigh_vals))]
for ii in range(len(neigh_vals)):
for jj in range(len(nn_mass_dist)):
hist_low_arr[ii][jj] = (hist_low_info[jj][neigh_vals[ii]])
hist_high_arr[ii][jj] = (hist_high_info[jj][neigh_vals[ii]])
plot_low_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_low_arr))]
plot_high_hist = [[[[] for yy in xrange(len(nn_mass_dist))] \
for zz in xrange(len(frac_vals))] for xx in \
xrange(len(hist_high_arr))]
for jj in range(len(nn_mass_dist)):
for hh in range(len(frac_vals)):
for ii in range(len(neigh_vals)):
plot_low_hist[ii][hh][jj] = hist_low_arr[ii][jj][frac_vals[hh]]
plot_high_hist[ii][hh][jj] = hist_high_arr[ii][jj][frac_vals[hh]]
###############################################################################
C = {}
D = {}
nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5}
coln_dict = {2:0,4:1,10:2}
nn_keys = np.sort(nn_dict.keys())
col_keys = np.sort(coln_dict.keys())
vv_num = len(plot_low_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(vv_num):
vv_arr = np.array(plot_low_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(vv_arr)
if cc == 0:
vv_tot = np.zeros((n_elem,1))
vv_tot = np.insert(vv_tot,len(vv_tot.T),vv_arr,1)
vv_tot = np.array(np.delete(vv_tot,0,axis=1))
for kk in xrange(len(vv_tot)):
vv_tot[kk][vv_tot[kk] == np.inf] = np.nan
vv_tot_max = [np.nanmax(vv_tot[kk]) for kk in xrange(len(vv_tot))]
vv_tot_min = [np.nanmin(vv_tot[kk]) for kk in xrange(len(vv_tot))]
C[bin_str] = [vv_tot_max,vv_tot_min]
hh_num = len(plot_high_hist[nn_dict[1]][coln_dict[2]])
for nn in nn_keys:
for coln in col_keys:
bin_str = '{0}_{1}'.format(nn,coln)
for cc in range(hh_num):
hh_arr = np.array(plot_high_hist[nn_dict[nn]][coln_dict[coln]][cc])
n_elem = len(hh_arr)
if cc == 0:
hh_tot = np.zeros((n_elem,1))
hh_tot = np.insert(hh_tot,len(hh_tot.T),hh_arr,1)
hh_tot = np.array(np.delete(hh_tot,0,axis=1))
for kk in xrange(len(hh_tot)):
hh_tot[kk][hh_tot[kk] == np.inf] = np.nan
hh_tot_max = [np.nanmax(hh_tot[kk]) for kk in xrange(len(hh_tot))]
hh_tot_min = [np.nanmin(hh_tot[kk]) for kk in xrange(len(hh_tot))]
D[bin_str] = [hh_tot_max,hh_tot_min]
###############################################################################
nrow_num = int(6)
ncol_num = int(3)
fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num, \
figsize=(150,200), sharex= True,sharey=True)
axes_flat = axes.flatten()
fig.text(0.02, 0.5,r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$', ha='center',
va='center',rotation='vertical',fontsize=20)
for ii in range(len(mass_dat)):
zz = 0
for jj in range(len(neigh_vals)):
for hh in range(3):
# upper = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower = C['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# upper_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][0]
# lower_2 = D['{0}_{1}'.format(neigh_vals[jj],frac_vals[hh])][1]
# plot_bands(bin_centers,upper,lower,axes_flat[zz])
# plot_bands(bin_centers,upper_2,lower_2,axes_flat[zz])
plot_hists(mass_dat[ii][neigh_vals[jj]],neigh_vals[jj],bins,dlogM,\
axes_flat[zz], hh, zz)
if ii == 0:
plot_eco_hists(eco_mass_dat[jj],bins,dlogM,\
axes_flat[zz],hh,zz)
zz += int(1)
plt.subplots_adjust(left=0.07, bottom=0.09, right=0.98, top=0.98,\
hspace=0, wspace=0)
plt.show()
###############################################################################
def schechter_log_func(stellar_mass,phi_star,alpha,m_star):
"""
Returns a plottable Schechter function for the
stellar mass functions of galaxies
Parameters
----------
stellar_mass: array-like
An array of unlogged stellar mass values which
will eventually be the x-axis values the function
is plotted against
phi_star: float-like
A constant which normalizes (?) the function;
Moves the graph up and down
alpha: negative integer-like
The faint-end, or in this case, low-mass slope;
Describes the power-law portion of the curve
m_star: float-like
Unlogged value of the characteristic (?) stellar
mass; the "knee" of the function, where the
power-law gives way to the exponential portion
Returns
-------
res: array-like
Array of values prepadodgerblue to be plotted on a log
scale to display the Schechter function
"""
constant = np.log(10) * phi_star
log_M_Mstar = np.log10(stellar_mass/m_star)
res = constant * 10**(log_M_Mstar * (alpha+1)) * \
np.exp(-10**log_M_Mstar)
return res
###############################################################################
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
param_arr = [[] for ii in range(len(mass_freq)+1)]
fig,axes = plt.subplots(nrows=3,ncols=3,sharex=True,sharey=True,\
figsize=(150,200))
axes_flat = axes.flatten()
##rather than having mass_freq go out of index, just used if statement and
##directed it to the eco info
for ii in range(len(mass_freq)+1):
if ii == range(len(mass_freq)+1)[-1]:
ydata = eco_freq[0]
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0,sigma=eco_freq[1])
else:
ydata = (mass_freq[ii])
opt_v, est_cov = optimize.curve_fit(schechter_log_func,xdata,ydata,\
p0=p0)
schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
opt_v[2])
param_arr[ii] = opt_v
param_arr = np.array(param_arr)
ax = axes_flat[ii]
ax.set_yscale('log')
ax.set_ylim([10**-3,10**0])
ax.set_xlim([9.1,11.9])
ax.set_yticks([10**-2,10**-1,10**0])
ax.plot(bin_centers,schech_vals,label='Schechter',color='silver')
if ii == 8:
ax.errorbar(bin_centers,ydata,yerr=eco_freq[1],color='dodgerblue',\
label='ECO')
else:
ax.plot(bin_centers,ydata,label='Mock',color='darkorchid')
if ii == 0 or ii == 8:
ax.legend(loc='best')
if ii == 7:
ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18)
plt.subplots_adjust(left=0.03, bottom=0.08, right=0.99, top=0.99,\
hspace=0,wspace=0)
plt.show()
###############################################################################
eco_low = {}
eco_high = {}
for jj in range(len(neigh_vals)):
eco_low[neigh_vals[jj]] = {}
eco_high[neigh_vals[jj]] = {}
eco_low[neigh_vals[jj]], eco_high[neigh_vals[jj]] = hist_calcs\
(eco_mass_dat[jj],bins,dlogM,eco=True)
###############################################################################
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# plt.show()
###############################################################################
def param_finder(hist_counts,bin_centers):
"""
Parameters
----------
hist-counts: array-like
An array with stellar mass function values which will be used in the
Schechter function parameterization
bin_centers: array-like
An array with the same number of values as hist_counts; used as
independent variable in Schechter function
Returns
-------
opt_v: array-like
Array with three values: phi_star, alpha, and M_star
res_arr: array-like
Array with two values: alpha and log_M_star
"""
xdata = 10**bin_centers
p0 = (1,-1.05,10**10.64)
opt_v,est_cov = optimize.curve_fit(schechter_log_func,xdata,\
hist_counts,p0=p0)
alpha = opt_v[1]
log_m_star = np.log10(opt_v[2])
res_arr = np.array([alpha,log_m_star])
return opt_v, res_arr
###############################################################################
###Test that param_finder is working
# opt_v,test_arr = param_finder(eco_low[1][2],bin_centers)
# schech_vals = schechter_log_func(10**bin_centers,opt_v[0],opt_v[1],\
# opt_v[2])
# ####THE error isn't working. Stops after 800 iterations.
# # opt_v,est_v = optimize.curve_fit(schechter_log_func,10**bin_centers,
# # eco_low[1][2],p0 = (1,-1.05,10**10.64),sigma=eco_low[1]['low_err'][0],\
# # absolute_sigma=True)
# fig,ax = plt.subplots()
# ax.set_yscale('log')
# ax.plot(bin_centers,eco_low[1][2])
# ax.plot(bin_centers,schech_vals)
# plt.show()
###############################################################################
def perc_calcs(mass,bins,dlogM):
mass_counts, edges = np.histogram(mass,bins)
mass_freq = mass_counts/float(len(mass))/dlogM
return mass_freq
###############################################################################
def deciles(mass):
dec_val = int(len(mass)/10)
res_list = [[] for bb in range(10)]
for aa in range(0,10):
if aa == 9:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_dec = {}
for cc in range(len(eco_mass_dat)):
eco_dec[neigh_vals[cc]] = deciles(eco_mass_dat[cc])
# for ii in range(len(eco_dec[1])):
# print len(eco_dec[1][ii])
eco_dec_smf = {}
for ss in neigh_vals:
eco_dec_smf[ss] = {}
for tt in range(len(eco_dec[ss])):
eco_dec_smf[ss][tt] = perc_calcs(eco_dec[ss][tt],bins,dlogM)
eco_dec_alpha = {}
eco_dec_logMstar = {}
for oo in neigh_vals:
eco_dec_alpha[oo] = []
eco_dec_logMstar[oo] = []
for pp in range(len(eco_dec[oo])):
opt_v, temp_res_arr = param_finder(eco_dec_smf[oo][pp],bin_centers)
eco_dec_alpha[oo].append(temp_res_arr[0])
eco_dec_logMstar[oo].append(temp_res_arr[1])
ten_x = range(1,11)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(ten_x,eco_dec_alpha[ii])
# ax.set_xlim(0,11)
# plt.show()
###############################################################################
def plot_deciles(dec_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,11)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.25,-1.)
ax.set_yticks(np.arange(-1.25,-1.,0.05))
ax.set_xticks(range(1,11))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Decile',fontsize=18)
if eco == True:
ax.plot(dec_num,y_vals,marker='o',color=color,linewidth=2.5,\
markeredgecolor=color)
else:
ax.plot(dec_num,y_vals,color=color,alpha=0.5)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quartiles(mass):
dec_val = int(len(mass)/4)
res_list = [[] for bb in range(4)]
for aa in range(0,4):
if aa == 3:
res_list[aa] = mass[aa*dec_val:]
else:
res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val]
return res_list
###############################################################################
eco_quarts = {}
for cc in range(len(eco_mass_dat)):
eco_quarts[neigh_vals[cc]] = quartiles(eco_mass_dat[cc])
eco_quarts_smf = {}
for ss in neigh_vals:
eco_quarts_smf[ss] = {}
for tt in range(len(eco_quarts[ss])):
eco_quarts_smf[ss][tt] = perc_calcs(eco_quarts[ss][tt],bins,dlogM)
eco_quarts_alpha = {}
eco_quarts_logMstar = {}
for oo in neigh_vals:
eco_quarts_alpha[oo] = []
eco_quarts_logMstar[oo] = []
for pp in range(len(eco_quarts[oo])):
opt_v, temp_res_arr = param_finder(eco_quarts_smf[oo][pp],bin_centers)
eco_quarts_alpha[oo].append(temp_res_arr[0])
eco_quarts_logMstar[oo].append(temp_res_arr[1])
quart_x = range(1,5)
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(quart_x,eco_quarts_alpha[ii])
# ax.set_xlim(0,5)
# plt.show()
###############################################################################
def plot_quartiles(quart_num,y_vals,ax,plot_idx,eco=False,logMstar=False,\
color='gray'):
if eco == True:
titles = [1,2,3,5,10,20]
ax.set_xlim(0,5)
if logMstar == True:
ax.set_ylim(10,12)
ax.set_yticks(np.arange(10,12,0.5))
else:
ax.set_ylim(-1.2,-1.)
ax.set_yticks(np.arange(-1.2,-1.,0.04))
ax.set_xticks(range(1,5))
title_here = 'n = {0}'.format(titles[plot_idx])
ax.text(0.05, 0.95, title_here,horizontalalignment='left',\
verticalalignment='top',transform=ax.transAxes,fontsize=18)
if plot_idx == 4:
ax.set_xlabel('Quartiles',fontsize=18)
if eco == True:
ax.plot(quart_num,y_vals,marker='o',color=color,linewidth=2,\
markeredgecolor=color)
else:
ax.plot(quart_num,y_vals,color=color)
###############################################################################
# nrow_num_mass = int(2)
# ncol_num_mass = int(3)
# fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
# figsize=(100,200), sharex= True, sharey = True)
# axes_flat = axes.flatten()
# zz = int(0)
# while zz <=5:
# ii = neigh_vals[zz]
# plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,eco=True,\
# logMstar=True)
# zz += 1
# plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
# hspace=0,wspace=0)
# plt.show()
###############################################################################
def quart_finder(mass,bins,dlogM,neigh_vals):
quarts = {}
for ii in neigh_vals:
quarts[ii] = quartiles(mass[ii])
quarts_smf = {}
for ss in neigh_vals:
quarts_smf[ss] = {}
for tt in range(len(quarts[ss])):
quarts_smf[ss][tt] = perc_calcs(quarts[ss][tt],bins,dlogM)
quarts_alpha = {}
quarts_logMstar = {}
for oo in neigh_vals:
quarts_alpha[oo] = []
quarts_logMstar[oo] = []
for pp in range(len(quarts[oo])):
opt_v, temp_res_arr = param_finder(quarts_smf[oo][pp],bin_centers)
quarts_alpha[oo].append(temp_res_arr[0])
quarts_logMstar[oo].append(temp_res_arr[1])
return quarts_alpha, quarts_logMstar
###############################################################################
mock_quarts_alpha_dict = {}
mock_quarts_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_quarts_alpha_dict[jj], mock_quarts_logMstar_dict[jj] = quart_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
def dec_finder(mass,bins,dlogM,neigh_vals):
decs = {}
for ii in neigh_vals:
decs[ii] = deciles(mass[ii])
decs_smf = {}
for ss in neigh_vals:
decs_smf[ss] = {}
for tt in range(len(decs[ss])):
decs_smf[ss][tt] = perc_calcs(decs[ss][tt],bins,dlogM)
decs_alpha = {}
decs_logMstar = {}
for oo in neigh_vals:
decs_alpha[oo] = []
decs_logMstar[oo] = []
for pp in range(len(decs[oo])):
opt_v, temp_res_arr = param_finder(decs_smf[oo][pp],bin_centers)
decs_alpha[oo].append(temp_res_arr[0])
decs_logMstar[oo].append(temp_res_arr[1])
return decs_alpha, decs_logMstar
###############################################################################
mock_dec_alpha_dict = {}
mock_dec_logMstar_dict = {}
for jj in range(len(mass_dat)):
mock_dec_alpha_dict[jj], mock_dec_logMstar_dict[jj] = dec_finder\
(mass_dat[jj],bins,dlogM,neigh_vals)
###############################################################################
###quartiles logMstar
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_quartiles(quart_x,eco_quarts_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, '$\log\ (M_{*}/M_{\odot})$', ha='center', \
va='center',rotation='vertical',fontsize=20)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_logMstar_dict[ff][ii],axes_flat[zz],\
zz,logMstar=True)
plot_deciles(ten_x,eco_dec_logMstar[ii],axes_flat[zz],zz,\
logMstar=True,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_deciles(ten_x,mock_dec_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_deciles(ten_x,eco_dec_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
nrow_num_mass = int(2)
ncol_num_mass = int(3)
fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \
figsize=(100,200), sharex= True, sharey = True)
axes_flat = axes.flatten()
fig.text(0.01, 0.5, r'$\alpha$', ha='center', \
va='center',rotation='vertical',fontsize=25)
zz = int(0)
while zz <=5:
ii = neigh_vals[zz]
for ff in range(len(mass_dat)):
plot_quartiles(quart_x,mock_quarts_alpha_dict[ff][ii],axes_flat[zz],zz,\
logMstar=False)
plot_quartiles(quart_x,eco_quarts_alpha[ii],axes_flat[zz],zz,\
logMstar=False,color='crimson',eco=True)
zz += 1
plt.subplots_adjust(left=0.05, bottom=0.09, right=1.00, top=1.00,\
hspace=0,wspace=0)
plt.show()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
##Creating dictionaries through for loops to house the parameters for each of
#ECO's 18 different options (6 nn and 3 density cuts)
#One dictionary for the lower portion of the cuts and one for the higher
# param_dict_low = {}
# param_dict_high = {}
# for dd in neigh_vals:
# param_dict_low[dd] = {}
# param_dict_high[dd] = {}
# for ee in frac_vals:
# param_dict_low[dd][ee] = {}
# param_dict_high[dd][ee] = {}
# opt_v, param_dict_low[dd][ee] = param_finder(eco_low[dd][ee],\
# bin_centers)
# opt_v, param_dict_high[dd][ee] = param_finder(eco_high[dd][ee],\
# bin_centers)
# #### Putting the percentile cuts in order, as seen below
# #10,25,low_50,high_50,75,90
# over_alpha_dict = {}
# over_log_m_star = {}
# for dd in neigh_vals:
# temp_list_alpha = []
# temp_list_logMstar = []
# over_alpha_dict[dd] = {}
# over_log_m_star[dd] = {}
# low_idx = np.array(list(reversed(np.sort(param_dict_low[dd].keys()))))
# high_idx = np.sort(param_dict_high[dd].keys())
# for ff in range(len(low_idx)):
# temp_list_alpha.append(param_dict_low[dd][low_idx[ff]][0])
# temp_list_logMstar.append(param_dict_low[dd][low_idx[ff]][1])
# for ff in range(len(high_idx)):
# temp_list_alpha.append(param_dict_high[dd][high_idx[ff]][0])
# temp_list_logMstar.append(param_dict_high[dd][high_idx[ff]][1])
# over_alpha_dict[dd] = temp_list_alpha
# over_log_m_star[dd] = temp_list_logMstar
# perc_arr = (10,25,49,51,75,90)
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_log_m_star[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\log\ M_{*}$')
# plt.show()
# fig,ax = plt.subplots()
# for jj in neigh_vals:
# ax.plot(perc_arr,over_alpha_dict[jj],marker='o',label='{0}'.format(jj), \
# linestyle='--')
# ax.set_xlim([0,100])
# ax.legend(loc='best', numpoints=1)
# ax.set_xlabel('Percentile')
# ax.set_ylabel(r'$\alpha$')
# plt.show()
### moving around the parameters so that I can find the differences, rather
#than just plotting them straigh-up
# diff_dict_m_star = {}
# diff_dict_alpha = {}
# for dd in neigh_vals:
# diff_dict_m_star[dd] = {}
# diff_dict_alpha[dd] = {}
# for jj in frac_vals:
# temp_list_diff_m_star = []
# temp_list_diff_alpha = []
# diff_dict_alpha[dd][jj] = {}
# diff_dict_m_star[dd][jj] = {}
# temp_list_diff_m_star.append((param_dict_high[dd][jj][1] - \
# param_dict_low[dd][jj][1]))
# temp_list_diff_alpha.append(((param_dict_high[dd][jj][0]-\
# param_dict_low[dd][jj][0])/param_dict_high[dd][jj][0] * 100))
# diff_dict_alpha[dd][jj] = np.array(temp_list_diff_alpha)
# diff_dict_m_star[dd][jj] = np.array(temp_list_diff_m_star)
# dict_revamp_mstar = {}
# for dd in neigh_vals:
# dict_revamp_mstar[dd] = []
# for jj in frac_vals:
# dict_revamp_mstar[dd].append(diff_dict_m_star[dd][jj])
# dict_revamp_alpha = {}
# for dd in neigh_vals:
# dict_revamp_alpha[dd] = []
# for jj in frac_vals:
# dict_revamp_alpha[dd].append(diff_dict_alpha[dd][jj])
# discrete_x = np.array([1,2,3])
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_mstar[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel('Difference in $\log\ M_{*}$, h-l',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,0.5,'50/50 Cut',horizontalalignment='center')
# ax.text(2,0.6,'25/75 Cut',horizontalalignment='center')
# ax.text(3,0.75,'10/90 Cut',horizontalalignment='center')
# plt.show()
# ######
# fig,ax = plt.subplots()
# for ii in neigh_vals:
# ax.plot(discrete_x,dict_revamp_alpha[ii],marker='o',\
# linestyle= '--',label='{0}'.format(ii))
# ax.set_xlim(0,4)
# ax.set_xlabel('Fractional Cut',fontsize=18)
# ax.set_xticks([1,2,3])
# ax.set_ylabel(r'Difference in $\alpha$, (h-l)/h',fontsize=18)
# ax.legend(loc='best',numpoints=1)
# ax.text(1,-7,'50/50 Cut',horizontalalignment='center')
# ax.text(2,-7,'25/75 Cut',horizontalalignment='center')
# ax.text(3,-7,'10/90 Cut',horizontalalignment='center')
# plt.show()
#50/50,25/75,10/908
# mocks_high_alpha = {}
# mocks_high_logMstar = {}
# mocks_low_alpha = {}
# mocks_low_logMstar = {}
# for rr in xrange(len(hist_high_info)):
# mocks_high_alpha[rr] = {}
# mocks_high_logMstar[rr] = {}
# mocks_low_alpha[rr] = {}
# mocks_low_logMstar[rr] = {}
# for ss in neigh_vals:
# mocks_high_alpha[rr][ss] = {}
# mocks_high_logMstar[rr][ss] = {}
# mocks_low_alpha[rr][ss] = {}
# mocks_low_logMstar[rr][ss] = {}
# for tt in frac_vals:
# opt_v, temp_res_high = param_finder(hist_high_info[rr][ss][tt],\
# bin_centers)
# opt_v, temp_res_low = param_finder(hist_low_info[rr][ss][tt],\
# bin_centers)
# mocks_high_alpha[rr][ss][tt] = temp_res_high[0]
# mocks_high_logMstar[rr][ss][tt] = temp_res_high[1]
# mocks_low_alpha[rr][ss][tt] = temp_res_low[0]
# mocks_low_logMstar[rr][ss][tt] = temp_res_low[1]
|
mit
|
beepee14/scikit-learn
|
sklearn/tree/export.py
|
78
|
15814
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
lkloh/aimbat-lite
|
scripts/egplot.py
|
1
|
1177
|
#!/usr/bin/env python
"""
Example python script for SAC plotting replication: p1, p2, prs.
Xiaoting Lou ([email protected])
03/07/2012
"""
from pylab import *
import matplotlib.transforms as transforms
from pysmo.aimbat.sacpickle import loadData
from pysmo.aimbat.plotphase import getDataOpts, PPConfig, sacp1, sacp2, sacprs
# figure axes
fig = figure(figsize=(9,12))
rectp2 = [.09, .050, .8, .15]
rectp1 = [.09, .245, .8, .33]
rectp0 = [.09, .620, .8, .36]
axp2 = fig.add_axes(rectp2)
axp1 = fig.add_axes(rectp1)
axp0 = fig.add_axes(rectp0)
# read data and plot
gsac, opts = getDataOpts()
# prs
opts.ynorm = .95
saclist = gsac.saclist
prs = sacprs(saclist, opts, axp0)
# p1
opts.ynorm = 1.7
p1 = sacp1(saclist, opts, axp1)
# p2
opts.reltime = 0
p2 = sacp2(saclist, opts, axp2)
# set x limits
axp0.set_xlim(625, 762)
axp1.set_xlim(625, 762)
axp2.set_xlim(-45, 65)
# numbering
axs = [axp0, axp1, axp2]
labs = 'abc'
for ax, lab in zip(axs, labs):
tt = '(' + lab + ')'
trans = transforms.blended_transform_factory(ax.transAxes, ax.transAxes)
ax.text(-.05, 1, tt, transform=trans, va='center', ha='right', size=16)
fig.savefig('egplot.pdf', format='pdf')
show()
|
gpl-3.0
|
f3r/scikit-learn
|
benchmarks/bench_plot_neighbors.py
|
287
|
6433
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
|
bsd-3-clause
|
JesseLivezey/pylearn2
|
pylearn2/cross_validation/dataset_iterators.py
|
14
|
19423
|
"""
Cross-validation dataset iterators.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
import numpy as np
import warnings
try:
from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit,
StratifiedShuffleSplit)
except ImportError:
warnings.warn("Could not import from sklearn.")
from pylearn2.compat import OrderedDict
from pylearn2.cross_validation.blocks import StackedBlocksCV
from pylearn2.cross_validation.subset_iterators import (
ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit,
StratifiedValidationShuffleSplit)
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.transformer_dataset import TransformerDataset
class DatasetCV(object):
"""
Construct a new DenseDesignMatrix for each subset.
Parameters
----------
dataset : object
Full dataset for use in cross validation.
subset_iterator : iterable
Iterable that returns (train, test) or (train, valid, test) indices
for partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
def __init__(self, dataset, subset_iterator, preprocessor=None,
fit_preprocessor=False, which_set=None, return_dict=True):
self.dataset = dataset
self.subset_iterator = list(subset_iterator) # allow generator reuse
dataset_iterator = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True)
self._data = dataset_iterator.next()
self.preprocessor = preprocessor
self.fit_preprocessor = fit_preprocessor
self.which_set = which_set
if which_set is not None:
which_set = np.atleast_1d(which_set)
assert len(which_set)
for label in which_set:
if label not in ['train', 'valid', 'test']:
raise ValueError("Unrecognized subset '{}'".format(label))
self.which_set = which_set
self.return_dict = return_dict
def get_data_subsets(self):
"""
Partition the dataset according to cross-validation subsets and
return the raw data in each subset.
"""
for subsets in self.subset_iterator:
labels = None
if len(subsets) == 3:
labels = ['train', 'valid', 'test']
elif len(subsets) == 2:
labels = ['train', 'test']
# data_subsets is an OrderedDict to maintain label order
data_subsets = OrderedDict()
for i, subset in enumerate(subsets):
subset_data = tuple(data[subset] for data in self._data)
if len(subset_data) == 2:
X, y = subset_data
else:
X, = subset_data
y = None
data_subsets[labels[i]] = (X, y)
yield data_subsets
def __iter__(self):
"""
Create a DenseDesignMatrix for each dataset subset and apply any
preprocessing to the child datasets.
"""
for data_subsets in self.get_data_subsets():
datasets = {}
for label, data in data_subsets.items():
X, y = data
datasets[label] = DenseDesignMatrix(X=X, y=y)
# preprocessing
if self.preprocessor is not None:
self.preprocessor.apply(datasets['train'],
can_fit=self.fit_preprocessor)
for label, dataset in datasets.items():
if label == 'train':
continue
self.preprocessor.apply(dataset, can_fit=False)
# which_set
if self.which_set is not None:
for label, dataset in list(datasets.items()):
if label not in self.which_set:
del datasets[label]
del data_subsets[label]
if not len(datasets):
raise ValueError("No matching dataset(s) for " +
"{}".format(self.which_set))
if not self.return_dict:
# data_subsets is an OrderedDict to maintain label order
datasets = list(datasets[label]
for label in data_subsets.keys())
if len(datasets) == 1:
datasets, = datasets
yield datasets
class StratifiedDatasetCV(DatasetCV):
"""
Subclass of DatasetCV for stratified experiments, where
the relative class proportions of the full dataset are maintained in
each partition.
Parameters
----------
dataset : object
Dataset to use in cross validation.
subset_iterator : iterable
Iterable that returns train/test or train/valid/test splits for
partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
@staticmethod
def get_y(dataset):
"""
Stratified cross-validation requires label information for
examples. This function gets target values for a dataset,
converting from one-hot encoding to a 1D array as needed.
Parameters
----------
dataset : object
Dataset containing target values for examples.
"""
y = np.asarray(dataset.y)
if y.ndim > 1:
assert np.array_equal(np.unique(y), [0, 1])
y = np.argmax(y, axis=1)
return y
class TransformerDatasetCV(object):
"""
Cross-validation with dataset transformations. This class returns
dataset subsets after transforming them with one or more pretrained
models.
Parameters
----------
dataset_iterator : DatasetCV
Cross-validation dataset iterator providing train/test or
train/valid/test datasets.
transformers : Model or iterable
Transformer model(s) to use for transforming datasets.
"""
def __init__(self, dataset_iterator, transformers):
self.dataset_iterator = dataset_iterator
self.transformers = transformers
def __iter__(self):
"""
Construct a Transformer dataset for each partition.
"""
for k, datasets in enumerate(self.dataset_iterator):
if isinstance(self.transformers, list):
transformer = self.transformers[k]
elif isinstance(self.transformers, StackedBlocksCV):
transformer = self.transformers.select_fold(k)
else:
transformer = self.transformers
if isinstance(datasets, list):
for i, dataset in enumerate(datasets):
datasets[i] = TransformerDataset(dataset, transformer)
else:
for key, dataset in datasets.items():
datasets[key] = TransformerDataset(dataset, transformer)
yield datasets
class DatasetKFold(DatasetCV):
"""
K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = KFold(n, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
super(DatasetKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
try:
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
except TypeError:
assert not shuffle and not random_state, (
"The 'shuffle' and 'random_state' arguments are not " +
"supported by this version of sklearn. See "
"http://scikit-learn.org/stable/developers/index.html" +
"#git-repo for details on installing the development version.")
cv = StratifiedKFold(y, n_folds=n_folds)
super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs)
class DatasetShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size,
train_size=train_size, random_state=random_state)
super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
train_size=train_size,
random_state=random_state)
super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationKFold(DatasetCV):
"""
K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = ValidationKFold(n, n_folds, shuffle, random_state)
super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetValidationKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state)
super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size,
train_size, random_state)
super(DatasetValidationShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation with train/valid/test
subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size,
train_size, random_state)
super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset,
cv,
**kwargs)
|
bsd-3-clause
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/quandl/model/data_mixin.py
|
1
|
2084
|
import pandas as pd
from quandl.errors.quandl_error import ColumnNotFound
class DataMixin(object):
# DataFrame will respect order of input list of list
def to_pandas(self, keep_column_indexes=[]):
data = self.to_list()
# ensure pandas gets a list of lists
if data and isinstance(data, list) and not isinstance(data[0], list):
data = [data]
if 'columns' in self.meta.keys():
df = pd.DataFrame(data=data, columns=self.columns)
for index, type in enumerate(self.column_types):
if type == 'Date':
df[self.columns[index]] = df[self.columns[index]].apply(pd.to_datetime)
else:
df = pd.DataFrame(data=data, columns=self.column_names)
# ensure our first column of time series data is of pd.datetime
df[self.column_names[0]] = df[self.column_names[0]].apply(pd.to_datetime)
df.set_index(self.column_names[0], inplace=True)
# unfortunately to_records() cannot handle unicode in 2.7
df.index.name = str(df.index.name)
# keep_column_indexes are 0 based, 0 is the first column
if len(keep_column_indexes) > 0:
self._validate_col_index(df, keep_column_indexes)
# need to decrement all our indexes by 1 because
# Date is considered a column by our API, but in pandas,
# it is the index, so column 0 is the first column after Date index
keep_column_indexes = list([x - 1 for x in keep_column_indexes])
df = df.iloc[:, keep_column_indexes]
return df
def to_numpy(self):
return self.to_pandas().to_records()
def to_csv(self):
return self.to_pandas().to_csv()
def _validate_col_index(self, df, keep_column_indexes):
num_columns = len(df.columns)
for col_index in keep_column_indexes:
if col_index > num_columns or col_index < 1:
raise ColumnNotFound('Requested column index %s does not exist'
% col_index)
|
mit
|
Mierzen/Dam-Simulation
|
modules/pumpingsystem.py
|
1
|
15638
|
import logging
import math
import os
import sys
import pandas as pd
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class PumpingLevel:
def __init__(self, name, capacity, initial_level, pump_flow, pump_power, pump_schedule_table, initial_pumps_status,
fissure_water_inflow, hysteresis=5.0, UL_LL=95.0, UL_HL=100.0,
fed_to_level=None, pump_statuses_for_validation=None,
n_mode_min_pumps=0, n_mode_max_pumps=3, n_mode_min_level=33, n_mode_max_level=77,
n_mode_control_range=5, n_mode_bottom_offset=3, n_mode_top_offset=3):
self.name = name
self.capacity = capacity
self.pump_flow = pump_flow
self.pump_power = pump_power
self.pump_schedule_table = pump_schedule_table
self.fissure_water_inflow = fissure_water_inflow
self.level_history = [initial_level]
self.pump_status_history = [initial_pumps_status]
self.fed_to_level = fed_to_level # to which level does this one pump?
self.last_outflow = 0
self.hysteresis = hysteresis
self.UL_LL = UL_LL
self.UL_HL = UL_HL
self.UL_100 = False
self.max_pumps = len([1 for r in pump_schedule_table if [150, 150, 150] not in r])
self.pump_statuses_for_validation = pump_statuses_for_validation # this is only used in validation mode
self.n_mode_min_level = n_mode_min_level
self.n_mode_max_level = n_mode_max_level
self.n_mode_min_pumps = n_mode_min_pumps
self.n_mode_max_pumps = n_mode_max_pumps
self.n_mode_control_range = n_mode_control_range
self.n_mode_bottom_offset = n_mode_bottom_offset
self.n_mode_top_offset = n_mode_top_offset
# calculate starting and stopping levels for n-factor mode
# 1 = peak, 2 = standard, 3 = off-peak
self.n_mode_lower_bound = {3: n_mode_min_level,
2: n_mode_min_level,
1: n_mode_max_level - n_mode_control_range}
self.n_mode_upper_bound = {3: n_mode_min_level + n_mode_control_range,
2: n_mode_min_level + n_mode_control_range,
1: n_mode_max_level}
self.n_mode_last_change = '000' # used for n-factor
logging.info('{} pumping level created.'.format(self.name))
if self.max_pumps != self.n_mode_max_pumps:
logging.warning('{} pumping level SCADA and third party max pumps differ ({} vs {})!.'.format(
self.name, self.max_pumps, self.n_mode_max_pumps))
def get_level_history(self, index=None):
return self.level_history if index is None else self.level_history[index]
# @levelHistory.setter
def set_latest_level(self, value):
self.level_history.append(value)
def get_pump_status_history(self, index=None):
return self.pump_status_history if index is None else self.pump_status_history[index]
# @levelHistory.setter
def set_latest_pump_status(self, value):
self.pump_status_history.append(value)
def get_scada_pump_schedule_table_level(self, pump_index, tariff_index):
return self.pump_schedule_table[pump_index, tariff_index]
def get_last_outflow(self):
return 0 if self.fed_to_level is None else self.last_outflow
def set_last_outflow(self, value):
self.last_outflow = value
def get_upstream_level_name(self):
return self.fed_to_level
def get_fissure_water_inflow(self, current_hour=None, current_minute=None, pumps=None):
if isinstance(self.fissure_water_inflow, int) or isinstance(self.fissure_water_inflow, float): # it is constant
return self.fissure_water_inflow
else:
if self.fissure_water_inflow.shape[1] == 2: # if 2 columns. Not f(pump)
f1 = 0
f2 = 1
row = math.floor(current_hour)
else: # 3 columns. Is f(pump)
f1 = 1
f2 = 2
row = pumps * 24 - 1 + math.floor(current_hour)
if math.floor(current_minute) <= 30:
col = f1
else:
col = f2
return self.fissure_water_inflow[int(row), int(col)]
def set_UL_100(self, bool_):
self.UL_100 = bool_
def get_eskom_tou(current_hour):
ch = current_hour
if (7 <= ch < 10) or (18 <= ch < 20): # Eskom peak
tou_time_slot = 1
elif (0 <= ch < 6) or (22 <= ch < 24): # Eskom off-peak
tou_time_slot = 3
else: # Eskom standard
tou_time_slot = 2
return tou_time_slot
def get_current_day_hour_minute(seconds):
cd = math.floor(seconds / 86400) # cd = current day
ch = (seconds - cd * 86400) / (60 * 60) # ch = current hour
cm = (seconds - cd * 86400 - math.floor(ch) * 60 * 60) / 60 # cm = current minute
return cd, ch, cm
class PumpSystem:
def __init__(self, name):
self.name = name
self.levels = []
self.eskom_tou = [3]
self.total_power = []
logging.info('{} pump system created.'.format(self.name))
def add_level(self, pumping_level):
self.levels.append(pumping_level)
logging.info('{} pumping level added to {} pump system.'.format(pumping_level.name, self.name))
def get_level_from_index(self, level_number):
return self.levels[level_number]
def get_level_from_name(self, level_name):
for l in self.levels:
if l.name == level_name:
return l
def __iter__(self):
return iter(self.levels)
def perform_simulation(self, mode, seconds=86400, save=False):
# 86400 = seconds in one day
logging.info('{} simulation started in {} mode.'.format(self.name, mode))
if mode not in ['1-factor', '2-factor', 'n-factor', 'validation']:
raise ValueError('Invalid simulation mode specified')
# reset simulation if it has run before
if len(self.total_power) > 1:
self.reset_pumpsystem_state()
for t in range(1, seconds): # start at 1, because initial conditions are specified
_, ch, cm = get_current_day_hour_minute(t)
tou_time_slot = get_eskom_tou(ch)
self.eskom_tou.append(tou_time_slot)
for level in self.levels:
# scheduling algorithm
if mode == '1-factor' or mode == '2-factor':
upstream_dam_name = level.get_upstream_level_name()
if mode == '1-factor' or upstream_dam_name is None:
upper_dam_level = 45
else:
upper_dam_level = self.get_level_from_name(upstream_dam_name).get_level_history(t - 1)
if upper_dam_level >= level.UL_HL:
level.set_UL_100(True)
if upper_dam_level <= level.UL_LL:
level.set_UL_100(False)
if not level.UL_100:
pumps_required = level.get_pump_status_history(t - 1)
pumps_required_temp = pumps_required
do_next_check = False
for p in range(1, level.max_pumps + 1):
dam_level = level.get_level_history(t - 1)
pump_level = level.get_scada_pump_schedule_table_level(p - 1, tou_time_slot - 1)
if dam_level >= pump_level:
pumps_required_temp = p
do_next_check = True
if dam_level < (
level.get_scada_pump_schedule_table_level(0,
tou_time_slot - 1) - level.hysteresis):
pumps_required = 0
do_next_check = False
if pumps_required >= (pumps_required_temp + 2):
pumps_required = pumps_required_temp + 1
if do_next_check:
if pumps_required_temp > pumps_required:
pumps_required = pumps_required_temp
else:
pumps_required = 0
elif mode == 'n-factor':
prev_level = level.get_level_history(t - 1)
prev_pumps = level.get_pump_status_history(t - 1)
pump_change = 0
if level.name == '31L':
if self.get_level_from_name('20L').get_level_history(t - 1) > 70:
level.n_mode_max_pumps = 1
if self.get_level_from_name('20L').get_level_history(t - 1) < 60:
level.n_mode_max_pumps = 2
if level.get_level_history(t - 1) >= (level.n_mode_max_level) and t < 42900:
level.n_mode_max_pumps = 2
if level.name == '20L':
if tou_time_slot == 1:
if level.get_level_history(t - 1) < 75:
level.n_mode_max_pumps = 1
if level.get_level_history(t - 1) < 60:
level.n_mode_max_pumps = 0
if level.get_level_history(t - 1) > 80:
level.n_mode_max_pumps = 1
else:
level.n_mode_max_pumps = 2
if level.name == 'IPC':
if tou_time_slot == 1:
level.n_mode_max_pumps = self.get_level_from_name('20L').n_mode_max_pumps
if level.get_level_history(t - 1) > 90:
level.n_mode_max_pumps = 1
else:
if self.get_level_from_name('Surface').get_level_history(t - 1) < 90 and t < 39600:
level.n_mode_max_pumps = 3
if level.get_level_history(t - 1) > 80 and t > 39600 and t < 64800:
level.n_mode_max_pumps = 3
if self.get_level_from_name('Surface').get_level_history(t - 1) < 90 and t > 57600:
level.n_mode_max_pumps = 3
if self.get_level_from_name('Surface').get_level_history(t - 1) >= 95 and t < 39600:
level.n_mode_max_pumps = 2
if self.get_level_from_name('Surface').get_level_history(
t - 1) >= 97.5 and level.get_level_history(t - 1) < 60:
level.n_mode_max_pumps = 1
if level.get_level_history(t - 1) < 50 and self.get_level_from_name(
'Surface').get_level_history(t - 1) >= 90 and t > 39600:
level.n_mode_max_pumps = 1
if t > 70200:
level.n_mode_max_pumps = 2
if t > 77400:
level.n_mode_max_pumps = 3
if t > 81000:
level.n_mode_max_pumps = 2
max_pumps = level.n_mode_max_pumps
for p in range(0, max_pumps):
# check if pumps should be switched on
check_lev = (level.n_mode_upper_bound[tou_time_slot] + p * level.n_mode_top_offset)
if prev_level >= check_lev:
this_change = check_lev
if this_change != level.n_mode_last_change:
pump_change = 1
level.n_mode_last_change = this_change
break
# check if pumps should be switched off
check_lev2 = (level.n_mode_lower_bound[tou_time_slot] - p * level.n_mode_bottom_offset)
if prev_level <= check_lev2:
this_change = check_lev2
if (level.n_mode_last_change == '000') or (this_change < level.n_mode_last_change) or (
tou_time_slot != self.eskom_tou[-2]):
pump_change = -1
level.n_mode_last_change = this_change
break
pumps_required = prev_pumps + pump_change
if pumps_required < level.n_mode_min_pumps:
pumps_required = level.n_mode_min_pumps
elif pumps_required > max_pumps:
pumps_required = max_pumps
else: # validation mode, so use actual statuses
pumps_required = level.pump_statuses_for_validation[t]
# calculate and update simulation values
pumps = pumps_required
outflow = pumps * level.pump_flow
level.set_last_outflow(outflow)
additional_in_flow = 0
for level2 in self.levels:
if level2.fed_to_level == level.name:
additional_in_flow += level2.get_last_outflow()
level_new = level.get_level_history(t - 1) + 100 / level.capacity * (
level.get_fissure_water_inflow(ch, cm, pumps) + additional_in_flow - outflow)
level.set_latest_level(level_new)
level.set_latest_pump_status(pumps)
# calculate pump system total power
# can do it in the loop above, though
power_list = []
for level in self.levels:
power_list.append(pd.DataFrame(level.get_pump_status_history()) * level.pump_power)
self.total_power = pd.concat(power_list, axis=1).sum(axis=1).values
logging.info('{} simulation completed in {} mode.'.format(self.name, mode))
if save:
self._save_simulation_results(mode, seconds)
def _save_simulation_results(self, mode, seconds):
df_list = []
index = range(0, seconds)
for level in self.levels:
data_level = level.get_level_history()
data_schedule = level.get_pump_status_history()
data = {level.name + " Level": data_level,
level.name + " Status": data_schedule}
df_list.append(pd.DataFrame(data=data, index=index))
df = pd.concat(df_list, axis=1)
data = {'Pump system total power': self.total_power,
'Eskom ToU': self.eskom_tou}
df = pd.concat([df, pd.DataFrame(data=data, index=index)], axis=1)
df.index.name = 'seconds'
os.makedirs(r'output/', exist_ok=True)
df.to_csv('output/{}_simulation_data_export_{}.csv.gz'.format(self.name, mode), compression='gzip')
logging.info('{} simulation data saved.'.format(mode))
def reset_pumpsystem_state(self):
self.eskom_tou = [3]
self.total_power = []
for level in self.levels:
level.level_history = [level.level_history[0]]
level.pump_status_history = [level.pump_status_history[0]]
level.last_outflow = 0
logging.info('{} pumping system successfully cleared.'.format(self.name))
|
gpl-3.0
|
DougBurke/astropy
|
astropy/tests/plugins/display.py
|
2
|
4418
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from ..helper import ignore_warnings
from ...utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from ... import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg != 'Astropy':
s = "\nRunning tests with {0} version {1}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {0}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {0}.\n\n".format(" ".join(dirs))
s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += "{0}: not available\n".format(module_display)
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += "{0}: {1}\n".format(module_display, version)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
bsd-3-clause
|
shyamalschandra/scikit-learn
|
examples/cluster/plot_dict_face_patches.py
|
337
|
2747
|
"""
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
|
bsd-3-clause
|
nvoron23/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
TheNameIsNigel/opencog
|
opencog/python/spatiotemporal/temporal_events/animation.py
|
34
|
4896
|
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
|
agpl-3.0
|
wathen/PhD
|
MHD/FEniCS/FieldSplit/LSC/3D/NSpicard.py
|
1
|
11853
|
#!/opt/local/bin/python
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import os
import scipy.io
#from PyTrilinos import Epetra, EpetraExt, AztecOO, ML, Amesos
#from scipy2Trilinos import scipy_csr_matrix2CrsMatrix
import PETScIO as IO
import time
import common
import CheckPetsc4py as CP
import NSprecond
from scipy.sparse import spdiags
import MatrixOperations as MO
import ExactSol
parameters["form_compiler"]["optimize"] = True
parameters["form_compiler"]["cpp_optimize"] = True
#MO.SwapBackend('epetra')
#os.system("echo $PATH")
m = 5
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Vdim = np.zeros((m-1,1))
Qdim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
nonlinear = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
AvIt = np.zeros((m-1,1))
nn = 2
dim = 2
Solver = 'PCD'
Saving = 'no'
case = 1
# parameters['linear_algebra_backend'] = 'uBLAS'
# parameters = CP.ParameterSetup()
def LOG(arg):
if INFO:
print(arg)
for xx in xrange(1,m):
print xx
nn = 2**(xx)
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn
mesh = BoxMesh(-1, -1,-1, 1, 1, 1, nn, nn,nn)
# tic()
parameters['reorder_dofs_serial'] = False
V = VectorFunctionSpace(mesh, "CG", 2)
Q = FunctionSpace(mesh, "CG", 1)
# QQ = VectorFunctionSpace(mesh,"B",3)
# V = V+QQ
parameters['reorder_dofs_serial'] = False
# print 'time to create function spaces', toc(),'\n\n'
W = V*Q
Vdim[xx-1] = V.dim()
Qdim[xx-1] = Q.dim()
Wdim[xx-1] = W.dim()
print "\n\nV: ",Vdim[xx-1],"Q: ",Qdim[xx-1],"W: ",Wdim[xx-1],"\n\n"
def boundary(x, on_boundary):
return on_boundary
u0, p0, Laplacian, Advection, gradPres = ExactSol.NS3D(case)
R = 100
MU = Constant(1e0)
# MU = 2/R
bcc = DirichletBC(W.sub(0),u0, boundary)
bcs = [bcc]
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
f = -MU*Laplacian+Advection+gradPres
n = FacetNormal(mesh)
h = CellSize(mesh)
h_avg =avg(h)
d = 0
u_k,p_k = common.Stokes(V,Q,u0,Laplacian+gradPres,[1,1,MU])
# p_k.vector()[:] = p_k.vector().array()
# u_k = Function(V)
# p_k = Function(Q)
uOld = np.concatenate((u_k.vector().array(),p_k.vector().array()), axis=0)
r = IO.arrayToVec(uOld)
a11 = MU*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds
a12 = div(v)*p*dx
a21 = div(u)*q*dx
L1 = inner(v, f)*dx
a = a11-a12-a21
r11 = MU*inner(grad(v), grad(u_k))*dx + inner((grad(u_k)*u_k),v)*dx + (1/2)*div(u_k)*inner(u_k,v)*dx- (1/2)*inner(u_k,n)*inner(u_k,v)*ds
r12 = div(v)*p_k*dx
r21 = div(u_k)*q*dx
RHSform = r11-r12-r21
p11 = MU*inner(grad(v), grad(u))*dx + inner((grad(u)*u_k),v)*dx + (1/2)*div(u_k)*inner(u,v)*dx- (1/2)*inner(u_k,n)*inner(u,v)*ds
p12 = div(v)*p*dx
prec = p11 -p12
bc = DirichletBC(W.sub(0),Expression(("0","0","0")), boundary)
bcs = [bc]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-5 # tolerance
iter = 0 # iteration counter
maxiter = 10 # max no of iterations allowed
# parameters = CP.ParameterSetup()
outerit = 0
if Solver == "LSC":
parameters['linear_algebra_backend'] = 'uBLAS'
BQB = assemble(inner(u,v)*dx- div(v)*p*dx-div(u)*q*dx)
bc.apply(BQB)
BQB = BQB.sparray()
X = BQB[0:V.dim(),0:V.dim()]
Xdiag = X.diagonal()
# Xdiag = X.sum(1).A
# print Xdiag
B = BQB[V.dim():W.dim(),0:V.dim()]
Bt = BQB[0:V.dim(),V.dim():W.dim()]
d = spdiags(1.0/Xdiag, 0, len(Xdiag), len(Xdiag))
L = B*d*Bt
Bd = B*d
dBt = d*Bt
L = PETSc.Mat().createAIJ(size=L.shape,csr=(L.indptr, L.indices, L.data))
Bd = PETSc.Mat().createAIJ(size=Bd.shape,csr=(Bd.indptr, Bd.indices, Bd.data))
dBt = PETSc.Mat().createAIJ(size=dBt.shape,csr=(dBt.indptr, dBt.indices, dBt.data))
parameters['linear_algebra_backend'] = 'PETSc'
elif Solver == "PCD":
(pQ) = TrialFunction(Q)
(qQ) = TestFunction(Q)
Mass = assemble(inner(pQ,qQ)*dx)
L = assemble(inner(grad(pQ),grad(qQ))*dx)
fp = MU*inner(grad(qQ), grad(pQ))*dx+inner((u_k[0]*grad(pQ)[0]+u_k[1]*grad(pQ)[1]+u_k[2]*grad(pQ)[2]),qQ)*dx + (1/2)*div(u_k)*inner(pQ,qQ)*dx - (1/2)*(u_k[0]*n[0]+u_k[1]*n[1]+u_k[2]*n[2])*inner(pQ,qQ)*ds
# print "hi"
L = CP.Assemble(L)
Mass = CP.Assemble(Mass)
# print L
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
x = Function(W)
uu = Function(W)
tic()
AA, bb = assemble_system(a, L1-RHSform, bcs)
A,b = CP.Assemble(AA,bb)
print toc()
print A
# b = b.getSubVector(t_is)
PP = assemble(prec)
bcc.apply(PP)
P = CP.Assemble(PP)
b = bb.array()
zeros = 0*b
bb = IO.arrayToVec(b)
x = IO.arrayToVec(zeros)
ksp = PETSc.KSP()
ksp.create(comm=PETSc.COMM_WORLD)
ksp.setTolerances(1e-5)
ksp.setType('grmes')
pc = ksp.getPC()
ksp.setOperators(A,P)
del A, P
pc.setType(PETSc.PC.Type.PYTHON)
if Solver == "LSC":
pc.setPythonContext(NSprecond.LSCnew(W,A,L,Bd,dBt))
elif Solver == "PCD":
F = assemble(fp)
F = CP.Assemble(F)
pc.setPythonContext(NSprecond.PCD(W, Mass, F, L))
# OptDB = PETSc.Options()
# OptDB['pc_factor_shift_amount'] = 1
# OptDB['pc_factor_mat_ordering_type'] = 'rcm'
# OptDB['pc_factor_mat_solver_package'] = 'mumps'
ksp.setFromOptions()
toc()
ksp.solve(bb, x)
time = toc()
print time
SolutionTime = SolutionTime +time
print ksp.its
outerit += ksp.its
# r = bb.duplicate()
# A.MUlt(x, r)
# r.aypx(-1, bb)
# rnorm = r.norm()
# PETSc.Sys.Print('error norm = %g' % rnorm,comm=PETSc.COMM_WORLD)
uu = IO.vecToArray(x)
UU = uu[0:Vdim[xx-1][0]]
# time = time+toc()
u1 = Function(V)
u1.vector()[:] = u1.vector()[:] + UU
pp = uu[Vdim[xx-1][0]:]
# time = time+toc()
p1 = Function(Q)
n = pp.shape
p1.vector()[:] = p1.vector()[:] + pp
diff = u1.vector().array()
eps = np.linalg.norm(diff, ord=np.Inf)
print '\n\n\niter=%d: norm=%g' % (iter, eps)
print np.linalg.norm(p1.vector().array(),ord=np.inf)
u2 = Function(V)
u2.vector()[:] = u1.vector().array() + u_k.vector().array()
p2 = Function(Q)
p2.vector()[:] = p1.vector().array() + p_k.vector().array()
u_k.assign(u2)
p_k.assign(p2)
uOld = np.concatenate((u_k.vector().array(),p_k.vector().array()), axis=0)
r = IO.arrayToVec(uOld)
SolTime[xx-1] = SolutionTime/iter
if case == 1:
ue = u0
pe = p0
elif case == 2:
ue = u0
pe = p0
AvIt[xx-1] = np.ceil(outerit/iter)
u = interpolate(ue,V)
p = interpolate(pe,Q)
ua = Function(V)
ua.vector()[:] = u_k.vector().array()
# nonlinear[xx-1] = assemble(inner((grad(ua)*ua),ua)*dx+(1/2)*div(ua)*inner(ua,ua)*dx- (1/2)*inner(ua,n)*inner(ua,ua)*ds)
VelocityE = VectorFunctionSpace(mesh,"CG",4)
u = interpolate(ue,VelocityE)
PressureE = FunctionSpace(mesh,"CG",3)
Nv = ua.vector().array().shape
X = IO.vecToArray(r)
xu = X[0:V.dim()]
ua = Function(V)
ua.vector()[:] = xu
pp = X[V.dim():V.dim()+Q.dim()]
n = pp.shape
pa = Function(Q)
pa.vector()[:] = pp
pend = assemble(pa*dx)
ones = Function(Q)
ones.vector()[:]=(0*pp+1)
pp = Function(Q)
pp.vector()[:] = pa.vector().array()- assemble(pa*dx)/assemble(ones*dx)
pInterp = interpolate(pe,PressureE)
pe = Function(PressureE)
pe.vector()[:] = pInterp.vector().array()
const = - assemble(pe*dx)/assemble(ones*dx)
pe.vector()[:] = pe.vector()[:]+const
ErrorU = Function(V)
ErrorP = Function(Q)
ErrorU = ue-ua
ErrorP = pe-pp
errL2u[xx-1]= sqrt(abs(assemble(inner(ErrorU, ErrorU)*dx)))
errH1u[xx-1]= errornorm(ue, ua, norm_type='H10', degree_rise=1)
errL2p[xx-1]= sqrt(abs(assemble(inner(ErrorP, ErrorP)*dx)))
if xx == 1:
l2uorder[xx-1] = 0
l2porder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
print errL2u[xx-1]
print errL2p[xx-1]
# del solver
print nonlinear
print "Velocity Elements rate of convergence ", np.log2(np.average((errL2u[0:m-2]/errL2u[1:m-1])))
print "Pressure Elements rate of convergence ", np.log2(np.average((errL2p[0:m-2]/errL2p[1:m-1])))
import pandas as pd
# tableTitles = ["Total DoF","V DoF","Q DoF","AvIt","V-L2","V-order","P-L2","P-order"]
# tableValues = np.concatenate((Wdim,Vdim,Qdim,AvIt,errL2u,l2uorder,errL2p,l2porder),axis=1)
# df = pd.DataFrame(tableValues, columns = tableTitles)
# pd.set_option('precision',3)
# print df
# print df.to_latex()
# print "\n\n Velocity convergence"
# VelocityTitles = ["Total DoF","V DoF","Soln Time","AvIt","V-L2","L2-order","V-H1","H1-order"]
# VelocityValues = np.concatenate((Wdim,Vdim,SolTime,AvIt,errL2u,l2uorder,errH1u,H1uorder),axis=1)
# VelocityTable= pd.DataFrame(VelocityValues, columns = VelocityTitles)
# pd.set_option('precision',3)
# VelocityTable = MO.PandasFormat(VelocityTable,"V-L2","%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,'V-H1',"%2.4e")
# VelocityTable = MO.PandasFormat(VelocityTable,"H1-order","%1.2f")
# VelocityTable = MO.PandasFormat(VelocityTable,'L2-order',"%1.2f")
# print VelocityTable
print "\n\n Pressure convergence"
PressureTitles = ["Total DoF","P DoF","Soln Time","AvIt","P-L2","L2-order"]
PressureValues = np.concatenate((Wdim,Qdim,SolTime,AvIt,errL2p,l2porder),axis=1)
PressureTable= pd.DataFrame(PressureValues, columns = PressureTitles)
pd.set_option('precision',3)
PressureTable = MO.PandasFormat(PressureTable,"P-L2","%2.4e")
PressureTable = MO.PandasFormat(PressureTable,'L2-order',"%1.2f")
print PressureTable
LatexTitles = ["DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((Vdim,Qdim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
# plt.loglog(N,erru)
# plt.title('Error plot for P2 elements - convergence = %f' % np.log2(np.average((erru[0:m-2]/erru[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plt.figure()
# plt.loglog(N,errp)
# plt.title('Error plot for P1 elements - convergence = %f' % np.log2(np.average((errp[0:m-2]/errp[1:m-1]))))
# plt.xlabel('N')
# plt.ylabel('L2 error')
# plot(ua)
# plot(interpolate(ue,V))
# plot(pp)
# plot(interpolate(pe,Q))
# interactive()
# plt.show()
|
mit
|
gfyoung/pandas
|
pandas/tests/indexes/datetimes/methods/test_shift.py
|
2
|
5459
|
from datetime import datetime
import pytest
import pytz
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import DatetimeIndex, Series, date_range
import pandas._testing as tm
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexShift:
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
def test_dti_shift_tzaware(self, tz_naive_fixture):
# GH#9903
tz = tz_naive_fixture
idx = DatetimeIndex([], name="xxx", tz=tz)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
idx = DatetimeIndex(
["2011-01-01 10:00", "2011-01-01 11:00", "2011-01-01 12:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = DatetimeIndex(
["2011-01-01 13:00", "2011-01-01 14:00", "2011-01-01 15:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = DatetimeIndex(
["2011-01-01 07:00", "2011-01-01 08:00", "2011-01-01 09:00"],
name="xxx",
tz=tz,
freq="H",
)
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
drange = date_range("20130101", periods=5)
result = drange.shift(1)
expected = DatetimeIndex(
["2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05", "2013-01-06"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(
["2012-12-31", "2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04"],
freq="D",
)
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq="2D")
expected = DatetimeIndex(
["2013-01-07", "2013-01-08", "2013-01-09", "2013-01-10", "2013-01-11"],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_dti_shift_int(self):
rng = date_range("1/1/2000", periods=20)
result = rng + 5 * rng.freq
expected = rng.shift(5)
tm.assert_index_equal(result, expected)
result = rng - 5 * rng.freq
expected = rng.shift(-5)
tm.assert_index_equal(result, expected)
def test_dti_shift_no_freq(self):
# GH#19147
dti = DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
dti.shift(2)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_shift_localized(self, tzstr):
dr = date_range("2011/1/1", "2012/1/1", freq="W-FRI")
dr_tz = dr.tz_localize(tzstr)
result = dr_tz.shift(1, "10T")
assert result.tz == dr_tz.tz
def test_dti_shift_across_dst(self):
# GH 8616
idx = date_range("2013-11-03", tz="America/Chicago", periods=7, freq="H")
s = Series(index=idx[:-1], dtype=object)
result = s.shift(freq="H")
expected = Series(index=idx[1:], dtype=object)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"shift, result_time",
[
[0, "2014-11-14 00:00:00"],
[-1, "2014-11-13 23:00:00"],
[1, "2014-11-14 01:00:00"],
],
)
def test_dti_shift_near_midnight(self, shift, result_time):
# GH 8616
dt = datetime(2014, 11, 14, 0)
dt_est = pytz.timezone("EST").localize(dt)
s = Series(data=[1], index=[dt_est])
result = s.shift(shift, freq="H")
expected = Series(1, index=DatetimeIndex([result_time], tz="EST"))
tm.assert_series_equal(result, expected)
def test_shift_periods(self):
# GH#22458 : argument 'n' was deprecated in favor of 'periods'
idx = date_range(start=START, end=END, periods=3)
tm.assert_index_equal(idx.shift(periods=0), idx)
tm.assert_index_equal(idx.shift(0), idx)
@pytest.mark.parametrize("freq", ["B", "C"])
def test_shift_bday(self, freq):
rng = date_range(START, END, freq=freq)
shifted = rng.shift(5)
assert shifted[0] == rng[5]
assert shifted.freq == rng.freq
shifted = rng.shift(-5)
assert shifted[5] == rng[0]
assert shifted.freq == rng.freq
shifted = rng.shift(0)
assert shifted[0] == rng[0]
assert shifted.freq == rng.freq
def test_shift_bmonth(self):
rng = date_range(START, END, freq=pd.offsets.BMonthEnd())
shifted = rng.shift(1, freq=pd.offsets.BDay())
assert shifted[0] == rng[0] + pd.offsets.BDay()
rng = date_range(START, END, freq=pd.offsets.BMonthEnd())
with tm.assert_produces_warning(pd.errors.PerformanceWarning):
shifted = rng.shift(1, freq=pd.offsets.CDay())
assert shifted[0] == rng[0] + pd.offsets.CDay()
def test_shift_empty(self):
# GH#14811
dti = date_range(start="2016-10-21", end="2016-10-21", freq="BM")
result = dti.shift(1)
tm.assert_index_equal(result, dti)
|
bsd-3-clause
|
hobson/pug-nlp
|
pug/nlp/constant.py
|
1
|
7982
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants and discovered values, like path to current installation of pug-nlp."""
from __future__ import division, print_function, absolute_import
from builtins import (
int, list, range, str,
chr,
zip)
# from builtins import (
# bytes, dict, int, list, object, range, str,
# ascii, chr, hex, input, next, oct, open,
# pow, round, super,
# filter, map, zip)
# import re
import os
import string
import datetime
from pytz import timezone
from collections import Mapping, OrderedDict
import pandas as pd
from decimal import Decimal
# TZ constants
try:
from django.conf import settings
TIME_ZONE = timezone(settings.TIME_ZONE)
except:
TIME_ZONE = timezone('UTC')
DEFAULT_TZ = timezone('UTC')
ROUNDABLE_NUMERIC_TYPES = (float, long, int, Decimal, bool)
FLOATABLE_NUMERIC_TYPES = (float, long, int, Decimal, bool)
BASIC_NUMERIC_TYPES = (float, long, int)
NUMERIC_TYPES = (float, long, int, Decimal, complex, str) # datetime.datetime, datetime.date
NUMBERS_AND_DATETIMES = (float, long, int, Decimal, complex, str)
SCALAR_TYPES = (float, long, int, Decimal, bool, complex, basestring, str, unicode) # datetime.datetime, datetime.date
# numpy types are derived from these so no need to include numpy.float64, numpy.int64 etc
DICTABLE_TYPES = (Mapping, tuple, list) # convertable to a dictionary (inherits Mapping or is a list of key/value pairs)
VECTOR_TYPES = (list, tuple)
PUNC = unicode(string.punctuation)
# synonyms for "count"
COUNT_NAMES = ['count', 'cnt', 'number', 'num', '#', 'frequency', 'probability', 'prob', 'occurences']
# 4 types of "histograms" and their canonical name/label
HIST_NAME = {
'hist': 'hist', 'ff': 'hist', 'fd': 'hist', 'dff': 'hist', 'dfd': 'hist', 'gfd': 'hist', 'gff': 'hist', 'bfd': 'hist', 'bff': 'hist',
'pmf': 'pmf', 'pdf': 'pmf', 'pd': 'pmf',
'cmf': 'cmf', 'cdf': 'cmf',
'cfd': 'cfd', 'cff': 'cfd', 'cdf': 'cfd',
}
HIST_CONFIG = {
'hist': {
'name': 'Histogram', # frequency distribution, frequency function, discrete ff/fd, grouped ff/fd, binned ff/fd
'kwargs': {'normalize': False, 'cumulative': False, },
'index': 0,
'xlabel': 'Bin',
'ylabel': 'Count',
},
'pmf': {
# PMFs have discrete, exact values as bins rather than ranges (finite bin widths)
# but this histogram configuration doesn't distinguish between PMFs and PDFs,
# since mathematically they have all the same properties.
# PDFs just have a range associated with each discrete value
# (which should be when integrating a PDF but not when summing a PMF where the "width" is uniformly 1)
'name': 'Probability Mass Function', # probability density function, probability distribution [function]
'kwargs': {'normalize': True, 'cumulative': False, },
'index': 1,
'xlabel': 'Bin',
'ylabel': 'Probability',
},
'cmf': {
'name': 'Cumulative Probability',
'kwargs': {'normalize': True, 'cumulative': True, },
'index': 2,
'xlabel': 'Bin',
'ylabel': 'Cumulative Probability',
},
'cfd': {
'name': 'Cumulative Frequency Distribution',
'kwargs': {'normalize': False, 'cumulative': True, },
'index': 3,
'xlabel': 'Bin',
'ylabel': 'Cumulative Count',
},
}
np = pd.np
BASE_PATH = os.path.dirname(__file__)
DATA_PATH = os.path.join(BASE_PATH, 'data')
tld_iana = pd.read_csv(os.path.join(DATA_PATH, 'tlds-from-iana.csv'))
tld_iana = OrderedDict(sorted(zip((tld.strip().lstrip('.') for tld in tld_iana.domain),
[(sponsor.strip(), -1) for sponsor in tld_iana.sponsor]),
key=lambda x: len(x[0]),
reverse=True))
# top 20 in Google searches per day
# sorted by longest first so .com matches before .om (Oman)
tld_popular = OrderedDict(sorted([
('com', ('Commercial', 4860000000)),
('org', ('Noncommercial', 1950000000)),
('edu', ('US accredited postsecondary institutions', 1550000000)),
('gov', ('United States Government', 1060000000)),
('uk', ('United Kingdom', 473000000)),
('net', ('Network services', 206000000)),
('ca', ('Canada', 165000000)),
('de', ('Germany', 145000000)),
('jp', ('Japan', 139000000)),
('fr', ('France', 96700000)),
('au', ('Australia', 91000000)),
('us', ('United States', 68300000)),
('ru', ('Russian Federation', 67900000)),
('ch', ('Switzerland', 62100000)),
('it', ('Italy', 55200000)),
('nl', ('Netherlands', 45700000)),
('se', ('Sweden', 39000000)),
('no', ('Norway', 32300000)),
('es', ('Spain', 31000000)),
('mil', ('US Military', 28400000)),
], key=lambda x: len(x[0]), reverse=True))
uri_schemes_iana = sorted(pd.read_csv(os.path.join(DATA_PATH, 'uri-schemes.xhtml.csv'),
index_col=0).index.values,
key=lambda x: len(str(x)), reverse=True)
uri_schemes_popular = ['chrome-extension', 'example', 'content', 'bitcoin',
'telnet', 'mailto',
'https', 'gtalk',
'http', 'smtp', 'feed',
'udp', 'ftp', 'ssh', 'git', 'apt', 'svn', 'cvs']
# these may not all be the sames isinstance types, depending on the env
FLOAT_TYPES = (float, np.float16, np.float32, np.float64, np.float128)
FLOAT_DTYPES = tuple(set(np.dtype(typ) for typ in FLOAT_TYPES))
INT_TYPES = (int, long, np.int0, np.int8, np.int16, np.int32, np.int64)
INT_DTYPES = tuple(set(np.dtype(typ) for typ in INT_TYPES))
NUMERIC_TYPES = tuple(set(list(FLOAT_TYPES) + list(INT_TYPES)))
NUMERIC_DTYPES = tuple(set(np.dtype(typ) for typ in NUMERIC_TYPES))
DATETIME_TYPES = (datetime.datetime, pd.datetime, np.datetime64)
DATE_TYPES = (datetime.datetime, datetime.date)
# matrices can be column or row vectors if they have a single col/row
VECTOR_TYPES = (list, tuple, np.matrix, np.ndarray)
MAPPING_TYPES = (Mapping, pd.Series, pd.DataFrame)
# These are the valid dates for all 3 datetime types in python (and the underelying integer nanoseconds)
MAX_INT64 = 9223372036854775807
MAX_UINT64 = MAX_INT64 * 2 - 1
MAX_UINT32 = 4294967295
MAX_INT32 = MAX_UINT32 // 2
MAX_UINT16 = 65535
MAX_INT16 = 32767
MAX_TIMESTAMP = pd.tslib.Timestamp('2262-04-11 23:47:16.854775807', tz='utc')
MIN_TIMESTAMP = pd.tslib.Timestamp(pd.datetime(1677, 9, 22, 0, 12, 44), tz='utc')
ZERO_TIMESTAMP = pd.tslib.Timestamp('1970-01-01 00:00:00', tz='utc')
MIN_DATETIME = MIN_TIMESTAMP.to_datetime()
MAX_DATETIME = MAX_TIMESTAMP.to_datetime()
MIN_DATETIME64 = MIN_TIMESTAMP.to_datetime64()
MAX_DATETIME64 = MAX_TIMESTAMP.to_datetime64()
INF = pd.np.inf
NAN = pd.np.nan
NAT = pd.NaT
# str constants
MAX_CHR = MAX_CHAR = chr(127)
APOSTROPHE_CHARS = "'`’"
UNPRINTABLE = ''.join(set(chr(i) for i in range(128)) - set(string.printable))
string.unprintable = UNPRINTABLE # monkey patch so import string from this module if you want this!
NULL_VALUES = set(['0', 'None', 'null', "'"] + ['0.' + z for z in ['0' * i for i in range(10)]])
# if datetime's are 'repr'ed before being checked for null values sometime 1899-12-30 will come up
NULL_REPR_VALUES = set(['datetime.datetime(1899, 12, 30)'])
# to allow NULL checks to strip off hour/min/sec from string repr when checking for equality
MAX_NULL_REPR_LEN = max(len(s) for s in NULL_REPR_VALUES)
PERCENT_SYMBOLS = ('percent', 'pct', 'pcnt', 'pt', r'%')
FINANCIAL_WHITESPACE = ('Flat', 'flat', ' ', ',', '"', "'", '\t', '\n', '\r', '$')
FINANCIAL_MAPPING = (('k', '000'), ('M', '000000'))
# MONTHS = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']
# MONTH_PREFIXES = [m[:3] for m in MONTHS]
# MONTH_SUFFIXES = [m[3:] for m in MONTHS]
# SUFFIX_LETTERS = ''.join(set(''.join(MONTH_SUFFIXES)))
|
mit
|
navrasio/mxnet
|
example/multivariate_time_series/src/lstnet.py
|
17
|
11583
|
# !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
#Todo: Ensure skip connection implementation is correct
import os
import math
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import logging
import metrics
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--q', type=int, default=24*7,
help='number of historical measurements included in each training example')
parser.add_argument('--horizon', type=int, default=3,
help='number of measurements ahead to predict')
parser.add_argument('--splits', type=str, default="0.6,0.2",
help='fraction of data to use for train & validation. remainder used for test.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--filter-list', type=str, default="6,12,18",
help='unique filter sizes')
parser.add_argument('--num-filters', type=int, default=100,
help='number of each filter size')
parser.add_argument('--recurrent-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--seasonal-period', type=int, default=24,
help='time between seasonal measurements')
parser.add_argument('--time-interval', type=int, default=1,
help='time between each measurement')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
def sym_gen(train_iter, q, filter_list, num_filter, dropout, rcells, skiprcells, seasonal_period, time_interval):
input_feature_shape = train_iter.provide_data[0][1]
X = mx.symbol.Variable(train_iter.provide_data[0].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
# reshape data before applying convolutional layer (takes 4D shape incase you ever work with images)
conv_input = mx.sym.reshape(data=X, shape=(0, 1, q, -1))
###############
# CNN Component
###############
outputs = []
for i, filter_size in enumerate(filter_list):
# pad input array to ensure number output rows = number input rows after applying kernel
padi = mx.sym.pad(data=conv_input, mode="constant", constant_value=0,
pad_width=(0, 0, 0, 0, filter_size - 1, 0, 0, 0))
convi = mx.sym.Convolution(data=padi, kernel=(filter_size, input_feature_shape[2]), num_filter=num_filter)
acti = mx.sym.Activation(data=convi, act_type='relu')
trans = mx.sym.reshape(mx.sym.transpose(data=acti, axes=(0, 2, 1, 3)), shape=(0, 0, 0))
outputs.append(trans)
cnn_features = mx.sym.Concat(*outputs, dim=2)
cnn_reg_features = mx.sym.Dropout(cnn_features, p=dropout)
###############
# RNN Component
###############
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(rcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
rnn_features = outputs[-1] #only take value from final unrolled cell for use later
####################
# Skip-RNN Component
####################
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(skiprcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
# Take output from cells p steps apart
p = int(seasonal_period / time_interval)
output_indices = list(range(0, q, p))
outputs.reverse()
skip_outputs = [outputs[i] for i in output_indices]
skip_rnn_features = mx.sym.concat(*skip_outputs, dim=1)
##########################
# Autoregressive Component
##########################
auto_list = []
for i in list(range(input_feature_shape[2])):
time_series = mx.sym.slice_axis(data=X, axis=2, begin=i, end=i+1)
fc_ts = mx.sym.FullyConnected(data=time_series, num_hidden=1)
auto_list.append(fc_ts)
ar_output = mx.sym.concat(*auto_list, dim=1)
######################
# Prediction Component
######################
neural_components = mx.sym.concat(*[rnn_features, skip_rnn_features], dim=1)
neural_output = mx.sym.FullyConnected(data=neural_components, num_hidden=input_feature_shape[2])
model_output = neural_output + ar_output
loss_grad = mx.sym.LinearRegressionOutput(data=model_output, label=Y)
return loss_grad, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(symbol, train_iter, valid_iter, data_names, label_names):
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
module = mx.mod.Module(symbol, data_names=data_names, label_names=label_names, context=devs)
module.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
module.init_params(mx.initializer.Uniform(0.1))
module.init_optimizer(optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr})
for epoch in range(1, args.num_epochs+1):
train_iter.reset()
val_iter.reset()
for batch in train_iter:
module.forward(batch, is_train=True) # compute predictions
module.backward() # compute gradients
module.update() # update parameters
train_pred = module.predict(train_iter).asnumpy()
train_label = train_iter.label[0][1].asnumpy()
print('\nMetrics: Epoch %d, Training %s' % (epoch, metrics.evaluate(train_pred, train_label)))
val_pred = module.predict(val_iter).asnumpy()
val_label = val_iter.label[0][1].asnumpy()
print('Metrics: Epoch %d, Validation %s' % (epoch, metrics.evaluate(val_pred, val_label)))
if epoch % args.save_period == 0 and epoch > 1:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if epoch == args.num_epochs:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.splits = list(map(float, args.splits.split(',')))
args.filter_list = list(map(int, args.filter_list.split(',')))
# Check valid args
if not max(args.filter_list) <= args.q:
raise AssertionError("no filter can be larger than q")
if not args.q >= math.ceil(args.seasonal_period / args.time_interval):
raise AssertionError("size of skip connections cannot exceed q")
# Build data iterators
train_iter, val_iter, test_iter = build_iters(args.data_dir, args.max_records, args.q, args.horizon, args.splits, args.batch_size)
# Choose cells for recurrent layers: each cell will take the output of the previous cell in the list
rcells = [mx.rnn.GRUCell(num_hidden=args.recurrent_state_size)]
skiprcells = [mx.rnn.LSTMCell(num_hidden=args.recurrent_state_size)]
# Define network symbol
symbol, data_names, label_names = sym_gen(train_iter, args.q, args.filter_list, args.num_filters,
args.dropout, rcells, skiprcells, args.seasonal_period, args.time_interval)
# train cnn model
train(symbol, train_iter, val_iter, data_names, label_names)
|
apache-2.0
|
barnabytprowe/great3-public
|
metrics/calculate_QG10_var_shear_normalization.py
|
2
|
5017
|
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""@file calculate_QG10_var_shear_normalization.py
Script for calculating G10-style metric results for a range of biases (normalization comes from the
ensemble mean value at fiducial bias).
"""
import sys
import os
import cPickle
import matplotlib.pyplot as plt
import numpy as np
import galsim
import g3metrics
NIMS = int(200)
NGRID = 100 # Each image contains a grid of NGRID x NGRID galaxies
DX_GRID = 0.1 # Grid spacing (must be in degrees)
NOISE_SIGMA = 0.05 # Expected noise on each shear after shape noise pushed largely into B-mode
NBINS=8 # Number of bins of PS for metric
Q10_SCALING = 1
CTEST = [1.e-4, 3.e-4, 1.e-3, 3.e-3, 1.e-2]
MTEST = [1.e-3, 3.e-3, 1.e-2, 3.e-2, 1.e-1]
NREPEAT = 1000
#GALSIM_DIR=os.path.join("/Path", "To", "Your", "Repo")
GALSIM_DIR=os.path.join("/Users", "browe", "great3", "galsim")
OUTFILE = os.path.join(
'results', 'normalizationv3_G10_QuadPS_N'+str(NREPEAT)+'_noise_sigma'+str(NOISE_SIGMA)+'.pkl')
if __name__ == "__main__":
reference_ps = g3metrics.read_ps(galsim_dir=GALSIM_DIR)
# Make the truth catalogues (a list of 2D, NGRIDxNGRID numpy arrays), reusing the reference_ps
# each time for simplicity
g1true_list, g2true_list = g3metrics.make_var_truth_catalogs(
1, NIMS, [reference_ps,], ngrid=NGRID, grid_units=galsim.degrees)
# Define some empty storage arrays
qG10unnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))
qQuadPSunnorm = np.empty((len(CTEST), len(MTEST), NREPEAT))
# TEMP: Make all the PS realizations (of the truth) the same to see if this alters noise props
g1true_list = [g1true_list[0],] * len(g1true_list)
g2true_list = [g2true_list[0],] * len(g2true_list)
# Then generate submissions, and truth submissions
for c, i in zip(CTEST, range(len(CTEST))):
print "Calculating PS metrics with c_i = "+str(c)
for m, j in zip(MTEST, range(len(MTEST))):
print "Calculating PS metrics with m_i = "+str(m)
for krepeat in range(NREPEAT):
# Make a fake submission
ksub, pEsubs, pBsubs, pEtrues, pBtrues = g3metrics.make_submission_var_shear(
c1=c, c2=c, m1=m, m2=m, g1true_list=g1true_list, g2true_list=g2true_list,
noise_sigma=NOISE_SIGMA, dx_grid=DX_GRID, nbins=NBINS)
# Calculate the G10 metric for this realization
qG10, mean_pEestG10, mean_pEtrueG10, mean_diffG10 = g3metrics.metricG10_var_shear(
ksub, pEsubs, [NOISE_SIGMA**2] * NIMS, pEtrues, scaling=Q10_SCALING,
dx_grid=DX_GRID)
# Calculate the QuadPS metric for this realization
qQuadPS, mean_pEestQuadPS, mean_pEtrueQuadPS, mean_diffQuadPS = \
g3metrics.metricQuadPS_var_shear(
ksub, pEsubs, [NOISE_SIGMA**2] * NIMS, pEtrues, scaling=Q10_SCALING,
dx_grid=DX_GRID)
# Save the results
qG10unnorm[i, j, krepeat] = qG10
qQuadPSunnorm[i, j, krepeat] = qQuadPS
# Plot something to stdout so the user knows progress is being made
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
print ""
print "Writing results to "+OUTFILE
cPickle.dump((qG10unnorm, qQuadPSunnorm), open(OUTFILE, 'wb'))
print ""
|
bsd-3-clause
|
jaredwo/obsio
|
obsio/providers/uscrn.py
|
1
|
3795
|
from .generic import ObsIO
from ..util.misc import open_remote_file
import pandas as pd
import numpy as np
_COLSPECS = [(20,20+13),(89,89+7)]
_COLNAMES = ['time_local','prcp']
def _parse_uscrn_obs(fpath, stnid):
obs = pd.read_fwf(fpath, colspecs=_COLSPECS, header=None,
names=_COLNAMES, na_values=[-9999.0, -99.000])
obs['time_local'] = pd.to_datetime(obs.time_local)
obs = obs.rename(columns={'time_local':'time', 'prcp':'obs_value'})
obs['elem'] = 'prcp'
obs['station_id'] = stnid
return obs.dropna()
class UscrnObsIO(ObsIO):
_avail_elems = ['prcp']
_requires_local = False
name = 'USCRN'
def __init__(self, **kwargs):
super(UscrnObsIO, self).__init__(**kwargs)
def _read_stns(self):
stns = pd.read_table(open_remote_file('https://www1.ncdc.noaa.gov/pub/data/uscrn/products/stations.tsv'))
stns = stns.rename(columns={a[0]:a[1] for a in zip(stns.columns,stns.columns.str.lower())})
stns['station_id'] = stns.state + "_" + stns.location + '_' + stns.vector
stns['station_id'] = stns['station_id'].str.replace(' ', '_')
stns = stns.rename(columns={'name':'station_name'})
stns = stns.set_index('station_id', drop=False)
stns['provider'] = 'USCRN'
stns['sub_provider'] = ''
stns['end_date'] = pd.to_datetime(stns.closing)
stns['end_date'] = stns.end_date.fillna(pd.Timestamp.now())
stns['commissioning'] = pd.to_datetime(stns.commissioning)
# For now, only return stations that are commissioned
stns = stns[stns.status=='Commissioned'].copy()
if self.bbox is not None:
mask_bnds = ((stns.latitude >= self.bbox.south) &
(stns.latitude <= self.bbox.north) &
(stns.longitude >= self.bbox.west) &
(stns.longitude <= self.bbox.east))
stns = stns[mask_bnds].copy()
if self.has_start_end_dates:
mask_por = (((self.start_date <= stns.commissioning) &
(stns.commissioning <= self.end_date)) |
((stns.commissioning <= self.start_date) &
(self.start_date <= stns.end_date)))
stns = stns[mask_por].copy()
return stns
def _read_obs(self, stns_ids=None):
if stns_ids is None:
stns_obs = self.stns
else:
stns_obs = self.stns.loc[stns_ids]
if self.has_start_end_dates:
yrs = np.arange(self.start_date.year, self.end_date.year+1)
else:
yrs = np.arange(2000, pd.Timestamp.now().year+1)
obs_all = []
for a_id in stns_obs.station_id:
yrs_stn = np.arange(stns_obs.loc[a_id].commissioning.year,
stns_obs.loc[a_id].end_date.year+1)
yrs_stn = yrs_stn[np.in1d(yrs_stn, yrs)]
obs_stn = []
for yr in yrs_stn:
print(a_id,yr)
url = ('https://www1.ncdc.noaa.gov/pub/data/uscrn/products/'
'hourly02/%d/CRNH0203-%d-%s.txt')%(yr,yr,a_id)
abuf = open_remote_file(url)
obs_stn.append(_parse_uscrn_obs(abuf,a_id))
obs_stn = pd.concat(obs_stn, ignore_index=True)
obs_all.append(obs_stn)
obs_all = pd.concat(obs_all, ignore_index=True)
obs_all = obs_all.set_index(['station_id', 'elem', 'time'])
obs_all = obs_all.sortlevel(0, sort_remaining=True)
return obs_all
|
gpl-3.0
|
ZENGXH/scikit-learn
|
examples/ensemble/plot_random_forest_embedding.py
|
286
|
3531
|
"""
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
bikong2/scikit-learn
|
sklearn/cluster/tests/test_mean_shift.py
|
150
|
3651
|
"""
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
|
bsd-3-clause
|
ky822/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
160
|
6028
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
DailyActie/Surrogate-Model
|
01-codes/scikit-learn-master/examples/svm/plot_svm_scale_c.py
|
1
|
5426
|
"""
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import LinearSVC
from sklearn.utils import check_random_state
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size, n_iter=250,
random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
|
mit
|
chanceraine/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/cm.py
|
70
|
5385
|
"""
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
|
agpl-3.0
|
IshankGulati/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
26
|
10161
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, bt, Y, dens_true):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first, bt, Y, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
|
bsd-3-clause
|
YinongLong/scikit-learn
|
sklearn/semi_supervised/tests/test_label_propagation.py
|
307
|
1974
|
""" test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
|
bsd-3-clause
|
jadnohra/connect
|
main_qt.py
|
1
|
3903
|
import argparse
from PyQt5.QtWidgets import QApplication, QTreeWidget, QTreeWidgetItem
from core.core import load_db
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtCore import Qt
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
class MathTextLabel(QTreeWidgetItem):
def __init__(self, mathText, parent=None, **kwargs):
super(QTreeWidgetItem, self).__init__(parent, **kwargs)
l=QVBoxLayout(self)
l.setContentsMargins(0,0,0,0)
r,g,b,a=self.palette().base().color().getRgbF()
self._figure=Figure(edgecolor=(r,g,b), facecolor=(r,g,b))
self._canvas=FigureCanvas(self._figure)
l.addWidget(self._canvas)
self._figure.clear()
text=self._figure.suptitle(
mathText,
x=0.0,
y=1.0,
horizontalalignment='left',
verticalalignment='top',
size=QtGui.QFont().pointSize()*2
)
self._canvas.draw()
(x0,y0),(x1,y1)=text.get_window_extent().get_points()
w=x1-x0; h=y1-y0
self._figure.set_size_inches(w/80, h/80)
self.setFixedSize(w,h)
class ViewTree(QTreeWidget):
def __init__(self, value):
super().__init__()
def fill_item(item, value):
def new_item(parent, text, val=None):
if val is not None or True:
child = QTreeWidgetItem([text])
else:
child = MathTextLabel(text)
# https://www.codecogs.com/latex/integration/htmlequations.php
# http://www.holoborodko.com/pavel/quicklatex/
fill_item(child, val)
parent.addChild(child)
child.setExpanded(True)
if value is None: return
elif isinstance(value, dict):
for key, val in sorted(value.items()):
new_item(item, str(key), val)
elif isinstance(value, (list, tuple)):
for val in value:
text = (str(val) if not isinstance(val, (dict, list, tuple))
else '[%s]' % type(val).__name__)
new_item(item, text, val)
else:
new_item(item, str(value))
fill_item(self.invisibleRootItem(), value)
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--file', help='The connect .yaml file')
parser.add_argument('--no_nltk', help='Disable nltk', action='store_true')
args = parser.parse_args()
data_source = args.file if args.file else './data/math/.'
db = load_db(data_source)
app = QApplication([])
# Now use a palette to switch to dark colors:
app.setStyle("Fusion")
from PyQt5.QtGui import QPalette, QColor
from PyQt5.QtCore import Qt
palette = QPalette()
palette.setColor(QPalette.Window, QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, Qt.white)
palette.setColor(QPalette.Base, QColor(25, 25, 25))
palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, Qt.white)
palette.setColor(QPalette.ToolTipText, Qt.white)
palette.setColor(QPalette.Text, Qt.white)
palette.setColor(QPalette.Button, QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, Qt.white)
palette.setColor(QPalette.BrightText, Qt.red)
palette.setColor(QPalette.Link, QColor(42, 130, 218))
palette.setColor(QPalette.Highlight, QColor(42, 130, 218))
palette.setColor(QPalette.HighlightedText, Qt.black)
app.setPalette(palette)
window = ViewTree(db)
window.setWindowTitle('connect')
window.show()
app.exec_()
|
unlicense
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.