repo_name
stringlengths
6
112
path
stringlengths
4
204
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
714
810k
license
stringclasses
15 values
mne-tools/mne-tools.github.io
0.21/_downloads/5925ef7bd5aa0a449a4da84bf3516e5a/plot_mne_inverse_label_connectivity.py
13
7516
""" ========================================================================= Compute source space connectivity and visualize it using a circular graph ========================================================================= This example computes the all-to-all connectivity between 68 regions in source space based on dSPM inverse solutions and a FreeSurfer cortical parcellation. The connectivity is visualized using a circular graph which is ordered based on the locations of the regions in the axial plane. """ # Authors: Martin Luessi <[email protected]> # Alexandre Gramfort <[email protected]> # Nicolas P. Rougier (graph code borrowed from his matplotlib gallery) # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator from mne.connectivity import spectral_connectivity from mne.viz import circular_layout, plot_connectivity_circle print(__doc__) ############################################################################### # Load our data # ------------- # # First we'll load the data we'll use in connectivity estimation. We'll use # the sample MEG data provided with MNE. data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Load data inverse_operator = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Define epochs for left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) ############################################################################### # Compute inverse solutions and their connectivity # ------------------------------------------------ # # Next, we need to compute the inverse solution for this data. This will return # the sources / source activity that we'll use in computing connectivity. We'll # compute the connectivity in the alpha band of these sources. We can specify # particular frequencies to include in the connectivity with the ``fmin`` and # ``fmax`` flags. Notice from the status messages how mne-python: # # 1. reads an epoch from the raw file # 2. applies SSP and baseline correction # 3. computes the inverse to obtain a source estimate # 4. averages the source estimate to obtain a time series for each label # 5. includes the label time series in the connectivity computation # 6. moves to the next epoch. # # This behaviour is because we are using generators. Since we only need to # operate on the data one epoch at a time, using a generator allows us to # compute connectivity in a computationally efficient manner where the amount # of memory (RAM) needed is independent from the number of epochs. # Compute inverse solution and for each epoch. By using "return_generator=True" # stcs will be a generator object instead of a list. snr = 1.0 # use lower SNR for single epochs lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi labels = mne.read_labels_from_annot('sample', parc='aparc', subjects_dir=subjects_dir) label_colors = [label.color for label in labels] # Average the source estimates within each label using sign-flips to reduce # signal cancellations, also here we return a generator src = inverse_operator['src'] label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip', return_generator=True) fmin = 8. fmax = 13. sfreq = raw.info['sfreq'] # the sampling frequency con_methods = ['pli', 'wpli2_debiased', 'ciplv'] con, freqs, times, n_epochs, n_tapers = spectral_connectivity( label_ts, method=con_methods, mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, mt_adaptive=True, n_jobs=1) # con is a 3D array, get the connectivity for the first (and only) freq. band # for each method con_res = dict() for method, c in zip(con_methods, con): con_res[method] = c[:, :, 0] ############################################################################### # Make a connectivity plot # ------------------------ # # Now, we visualize this connectivity using a circular graph layout. # First, we reorder the labels based on their location in the left hemi label_names = [label.name for label in labels] lh_labels = [name for name in label_names if name.endswith('lh')] # Get the y-location of the label label_ypos = list() for name in lh_labels: idx = label_names.index(name) ypos = np.mean(labels[idx].pos[:, 1]) label_ypos.append(ypos) # Reorder the labels based on their location lh_labels = [label for (yp, label) in sorted(zip(label_ypos, lh_labels))] # For the right hemi rh_labels = [label[:-2] + 'rh' for label in lh_labels] # Save the plot order and create a circular layout node_order = list() node_order.extend(lh_labels[::-1]) # reverse the order node_order.extend(rh_labels) node_angles = circular_layout(label_names, node_order, start_pos=90, group_boundaries=[0, len(label_names) / 2]) # Plot the graph using node colors from the FreeSurfer parcellation. We only # show the 300 strongest connections. plot_connectivity_circle(con_res['pli'], label_names, n_lines=300, node_angles=node_angles, node_colors=label_colors, title='All-to-All Connectivity left-Auditory ' 'Condition (PLI)') ############################################################################### # Make two connectivity plots in the same figure # ---------------------------------------------- # # We can also assign these connectivity plots to axes in a figure. Below we'll # show the connectivity plot using two different connectivity methods. fig = plt.figure(num=None, figsize=(8, 4), facecolor='black') no_names = [''] * len(label_names) for ii, method in enumerate(con_methods): plot_connectivity_circle(con_res[method], no_names, n_lines=300, node_angles=node_angles, node_colors=label_colors, title=method, padding=0, fontsize_colorbar=6, fig=fig, subplot=(1, 3, ii + 1)) plt.show() ############################################################################### # Save the figure (optional) # -------------------------- # # By default matplotlib does not save using the facecolor, even though this was # set when the figure was generated. If not set via savefig, the labels, title, # and legend will be cut off from the output png file. # fname_fig = data_path + '/MEG/sample/plot_inverse_connect.png' # fig.savefig(fname_fig, facecolor='black')
bsd-3-clause
benanne/kaggle-galaxies
predict_augmented_npy_maxout2048_extradense_pysexgen1_dup2.py
7
9737
""" Load an analysis file and redo the predictions on the validation set / test set, this time with augmented data and averaging. Store them as numpy files. """ import numpy as np # import pandas as pd import theano import theano.tensor as T import layers import cc_layers import custom import load_data import realtime_augmentation as ra import time import csv import os import cPickle as pickle BATCH_SIZE = 32 # 16 NUM_INPUT_FEATURES = 3 CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size # ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl" ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_pysexgen1_dup2.pkl" DO_VALID = True # disable this to not bother with the validation set evaluation DO_TEST = True # disable this to not generate predictions on the testset target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz") target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename) target_path_test = os.path.join("predictions/final/augmented/test", target_filename) print "Loading model data etc." analysis = np.load(ANALYSIS_PATH) input_sizes = [(69, 69), (69, 69)] ds_transforms = [ ra.build_ds_transform(3.0, target_size=input_sizes[0]), ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)] num_input_representations = len(ds_transforms) # split training data into training + a small validation set num_train = load_data.num_train num_valid = num_train // 10 # integer division num_train -= num_valid num_test = load_data.num_test valid_ids = load_data.train_ids[num_train:] train_ids = load_data.train_ids[:num_train] test_ids = load_data.test_ids train_indices = np.arange(num_train) valid_indices = np.arange(num_train, num_train+num_valid) test_indices = np.arange(num_test) y_valid = np.load("data/solutions_train.npy")[num_train:] print "Build model" l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1]) l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1]) l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True) l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r) l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2) l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2) l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True) l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2) l3s = cc_layers.ShuffleC01BToBC01Layer(l3) j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity) l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape') l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity) l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape') # l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity) l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity) # l6 = layers.OutputLayer(l5, error_measure='mse') l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.) xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)] idx = T.lscalar('idx') givens = { l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE], l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE], } compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens) print "Load model parameters" layers.set_param_values(l6, analysis['param_values']) print "Create generators" # set here which transforms to use to make predictions augmentation_transforms = [] for zoom in [1 / 1.2, 1.0, 1.2]: for angle in np.linspace(0, 360, 10, endpoint=False): augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom)) augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped print " %d augmentation transforms." % len(augmentation_transforms) augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling) valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1) augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexGen1CenteringRescaling) test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1) approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE))) approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE))) print "Approximately %d chunks for the validation set" % approx_num_chunks_valid print "Approximately %d chunks for the test set" % approx_num_chunks_test if DO_VALID: print print "VALIDATION SET" print "Compute predictions" predictions_list = [] start_time = time.time() for e, (chunk_data, chunk_length) in enumerate(valid_gen): print "Chunk %d" % (e + 1) xs_chunk = chunk_data # need to transpose the chunks to move the 'channels' dimension up xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] print " load data onto GPU" for x_shared, x_chunk in zip(xs_shared, xs_chunk): x_shared.set_value(x_chunk) num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # make predictions, don't forget to cute off the zeros at the end predictions_chunk_list = [] for b in xrange(num_batches_chunk): if b % 1000 == 0: print " batch %d/%d" % (b + 1, num_batches_chunk) predictions = compute_output(b) predictions_chunk_list.append(predictions) predictions_chunk = np.vstack(predictions_chunk_list) predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding print " compute average over transforms" predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1) predictions_list.append(predictions_chunk_avg) time_since_start = time.time() - start_time print " %s since start" % load_data.hms(time_since_start) all_predictions = np.vstack(predictions_list) print "Write predictions to %s" % target_path_valid load_data.save_gz(target_path_valid, all_predictions) print "Evaluate" rmse_valid = analysis['losses_valid'][-1] rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2)) print " MSE (last iteration):\t%.6f" % rmse_valid print " MSE (augmented):\t%.6f" % rmse_augmented if DO_TEST: print print "TEST SET" print "Compute predictions" predictions_list = [] start_time = time.time() for e, (chunk_data, chunk_length) in enumerate(test_gen): print "Chunk %d" % (e + 1) xs_chunk = chunk_data # need to transpose the chunks to move the 'channels' dimension up xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] print " load data onto GPU" for x_shared, x_chunk in zip(xs_shared, xs_chunk): x_shared.set_value(x_chunk) num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # make predictions, don't forget to cute off the zeros at the end predictions_chunk_list = [] for b in xrange(num_batches_chunk): if b % 1000 == 0: print " batch %d/%d" % (b + 1, num_batches_chunk) predictions = compute_output(b) predictions_chunk_list.append(predictions) predictions_chunk = np.vstack(predictions_chunk_list) predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding print " compute average over transforms" predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1) predictions_list.append(predictions_chunk_avg) time_since_start = time.time() - start_time print " %s since start" % load_data.hms(time_since_start) all_predictions = np.vstack(predictions_list) print "Write predictions to %s" % target_path_test load_data.save_gz(target_path_test, all_predictions) print "Done!"
bsd-3-clause
Jackson-Y/Machine-Learning
recommendation/wide_n_deep.py
1
7548
# -*- coding: utf-8 -*- """ Wide & Deep Learning 宽度&深度学习模型,基于tensorflow平台 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import tempfile import pandas as pd from six.moves import urllib import tensorflow as tf CSV_COLUMNS = [ "age", "workclass", "fnlwgt", "education", "education_num", "marital_status", "occupation", "relationship", "race", "gender", "capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket" ] gender = tf.feature_column.categorical_column_with_vocabulary_list( "gender", ["Female", "Male"]) education = tf.feature_column.categorical_column_with_vocabulary_list( "education", [ "Bachelors", "HS-grad", "11th", "Masters", "9th", "Some-college", "Assoc-acdm", "Assoc-voc", "7th-8th", "Doctorate", "Prof-school", "5th-6th", "10th", "1st-4th", "Preschool", "12th" ]) marital_status = tf.feature_column.categorical_column_with_vocabulary_list( "marital_status", [ "Married-civ-spouse", "Divorced", "Married-spouse-absent", "Never-married", "Separated", "Married-AF-spouse", "Widowed" ]) relationship = tf.feature_column.categorical_column_with_vocabulary_list( "relationship", [ "Husband", "Not-in-family", "Wife", "Own-child", "Unmarried", "Other-relative" ]) workclass = tf.feature_column.categorical_column_with_vocabulary_list( "workclass", [ "Self-emp-not-inc", "Private", "State-gov", "Federal-gov", "Local-gov", "?", "Self-emp-inc", "Without-pay", "Never-worked" ]) # To show an example of hashing occupation = tf.feature_column.categorical_column_with_hash_bucket( "occupation", hash_bucket_size=1000) native_country = tf.feature_column.categorical_column_with_hash_bucket( "native_country", hash_bucket_size=1000) # Continuous base columns age = tf.feature_column.numeric_column("age") education_num = tf.feature_column.numeric_column("education_num") capital_gain = tf.feature_column.numeric_column("capital_gain") capital_loss = tf.feature_column.numeric_column("capital_loss") hours_per_week = tf.feature_column.numeric_column("hours_per_week") # Transformations age_buckets = tf.feature_column.bucketized_column( age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) # Wide columns and deep columns. base_columns = [ gender, education, marital_status, relationship, workclass, occupation, native_country, age_buckets, ] crossed_columns = [ tf.feature_column.crossed_column( ["education", "occupation"], hash_bucket_size=1000), tf.feature_column.crossed_column( [age_buckets, "education", "occupation"], hash_bucket_size=1000), tf.feature_column.crossed_column( ["native_country", "occupation"], hash_bucket_size=1000) ] deep_columns = [ tf.feature_column.indicator_column(workclass), tf.feature_column.indicator_column(education), tf.feature_column.indicator_column(gender), tf.feature_column.indicator_column(relationship), # To show an example of embedding tf.feature_column.embedding_column(native_country, dimension=8), tf.feature_column.embedding_column(occupation, dimension=8), age, education_num, capital_gain, capital_loss, hours_per_week, ] def maybe_download(train_data, test_data): """Maybe downloads training data and returns train and test file names.""" if train_data: train_file_name = train_data else: train_file = tempfile.NamedTemporaryFile(delete=False) urllib.request.urlretrieve( "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", train_file.name) # pylint: disable=line-too-long train_file_name = train_file.name train_file.close() print("Training data is downloaded to %s" % train_file_name) if test_data: test_file_name = test_data else: test_file = tempfile.NamedTemporaryFile(delete=False) urllib.request.urlretrieve( "https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", test_file.name) # pylint: disable=line-too-long test_file_name = test_file.name test_file.close() print("Test data is downloaded to %s"% test_file_name) return train_file_name, test_file_name def build_estimator(model_dir, model_type): """Build an estimator.""" if model_type == "wide": m = tf.estimator.LinearClassifier( model_dir=model_dir, feature_columns=base_columns + crossed_columns) elif model_type == "deep": m = tf.estimator.DNNClassifier( model_dir=model_dir, feature_columns=deep_columns, hidden_units=[100, 50]) else: m = tf.estimator.DNNLinearCombinedClassifier( model_dir=model_dir, linear_feature_columns=crossed_columns, dnn_feature_columns=deep_columns, dnn_hidden_units=[100, 50]) return m def input_fn(data_file, num_epochs, shuffle): """Input builder function.""" df_data = pd.read_csv( tf.gfile.Open(data_file), names=CSV_COLUMNS, skipinitialspace=True, engine="python", skiprows=1) # remove NaN elements df_data = df_data.dropna(how="any", axis=0) labels = df_data["income_bracket"].apply(lambda x: ">50k" in x).astype(int) return tf.estimator.inputs.pandas_input_fn( x=df_data, y=labels, batch_size=100, num_epochs=num_epochs, shuffle=shuffle, num_threads=5) def train_and_eval(model_dir, model_type, train_steps, train_data, test_data): """Train and evaluate the model.""" train_file_name, test_file_name = maybe_download(train_data, test_data) model_dir = tempfile.mkdtemp() if not model_dir else model_dir m = build_estimator(model_dir, model_type) m.train( input_fn=input_fn(train_file_name, num_epochs=None, shuffle=True), steps=train_steps ) results = m.evaluate( input_fn=input_fn(test_file_name, num_epochs=1, shuffle=False), steps=None ) print("model directory = %s" % model_dir) for key in sorted(results): print("%s: %s" % (key, results[key])) FLAGS = None def main(_): train_and_eval(FLAGS.model_dir, FLAGS.model_type, FLAGS.train_steps, FLAGS.train_data, FLAGS.test_data) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.register("type", "bool", lambda v: v.lower() == "true") parser.add_argument( "--model_dir", type=str, default="./model/", help="Base directory for output models." ) parser.add_argument( "--model_type", type=str, default="wide_n_deep", help="Valid model types: {'wide', 'deep', 'wide_n_deep'}." ) parser.add_argument( "--train_steps", type=int, default=1000, help="Number of training steps." ) parser.add_argument( "--train_data", type=str, default="../exercise/ADULT_data/adult.train", help="Path to the training data." ) parser.add_argument( "--test_data", type=str, default="../exercise/ADULT_data/adult.test", help="Path to the test data." ) FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
mit
q1ang/scikit-learn
sklearn/metrics/tests/test_regression.py
272
6066
from __future__ import division, print_function import numpy as np from itertools import product from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.metrics.regression import _check_reg_targets def test_regression_metrics(n_samples=50): y_true = np.arange(n_samples) y_pred = y_true + 1 assert_almost_equal(mean_squared_error(y_true, y_pred), 1.) assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.) assert_almost_equal(median_absolute_error(y_true, y_pred), 1.) assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2) assert_almost_equal(explained_variance_score(y_true, y_pred), 1.) def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred, multioutput='variance_weighted') assert_almost_equal(error, 1. - 5. / 2) error = r2_score(y_true, y_pred, multioutput='uniform_average') assert_almost_equal(error, -.875) def test_regression_metrics_at_limits(): assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) def test__check_reg_targets(): # All of length 3 EXAMPLES = [ ("continuous", [1, 2, 3], 1), ("continuous", [[1], [2], [3]], 1), ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), ] for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): if type1 == type2 and n_out1 == n_out2: y_type, y_check1, y_check2, multioutput = _check_reg_targets( y1, y2, None) assert_equal(type1, y_type) if type1 == 'continuous': assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) else: assert_array_equal(y_check1, y1) assert_array_equal(y_check2, y2) else: assert_raises(ValueError, _check_reg_targets, y1, y2, None) def test_regression_multioutput_array(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2) assert_array_almost_equal(mae, [0.25, 0.625], decimal=2) assert_array_almost_equal(r, [0.95, 0.93], decimal=2) assert_array_almost_equal(evs, [0.95, 0.93], decimal=2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. y_true = [[0, 0]]*4 y_pred = [[1, 1]]*4 mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [1., 1.], decimal=2) assert_array_almost_equal(mae, [1., 1.], decimal=2) assert_array_almost_equal(r, [0., 0.], decimal=2) r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(r, [0, -3.5], decimal=2) assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='uniform_average')) evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(evs, [0, -1.25], decimal=2) # Checking for the condition in which both numerator and denominator is # zero. y_true = [[1, 3], [-1, 2]] y_pred = [[1, 4], [-1, 1]] r2 = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(r2, [1., -3.], decimal=2) assert_equal(np.mean(r2), r2_score(y_true, y_pred, multioutput='uniform_average')) evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(evs, [1., -3.], decimal=2) assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred)) def test_regression_custom_weights(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) assert_almost_equal(msew, 0.39, decimal=2) assert_almost_equal(maew, 0.475, decimal=3) assert_almost_equal(rw, 0.94, decimal=2) assert_almost_equal(evsw, 0.94, decimal=2)
bsd-3-clause
carrillo/scikit-learn
sklearn/feature_extraction/hashing.py
183
6155
# Author: Lars Buitinck <[email protected]> # License: BSD 3 clause import numbers import numpy as np import scipy.sparse as sp from . import _hashing from ..base import BaseEstimator, TransformerMixin def _iteritems(d): """Like d.iteritems, but accepts any collections.Mapping.""" return d.iteritems() if hasattr(d, "iteritems") else d.items() class FeatureHasher(BaseEstimator, TransformerMixin): """Implements feature hashing, aka the hashing trick. This class turns sequences of symbolic feature names (strings) into scipy.sparse matrices, using a hash function to compute the matrix column corresponding to a name. The hash function employed is the signed 32-bit version of Murmurhash3. Feature names of type byte string are used as-is. Unicode strings are converted to UTF-8 first, but no Unicode normalization is done. Feature values must be (finite) numbers. This class is a low-memory alternative to DictVectorizer and CountVectorizer, intended for large-scale (online) learning and situations where memory is tight, e.g. when running prediction code on embedded devices. Read more in the :ref:`User Guide <feature_hashing>`. Parameters ---------- n_features : integer, optional The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. dtype : numpy type, optional The type of feature values. Passed to scipy.sparse matrix constructors as the dtype argument. Do not set this to bool, np.boolean or any unsigned integer type. input_type : string, optional Either "dict" (the default) to accept dictionaries over (feature_name, value); "pair" to accept pairs of (feature_name, value); or "string" to accept single strings. feature_name should be a string, while value should be a number. In the case of "string", a value of 1 is implied. The feature_name is hashed to find the appropriate column for the feature. The value's sign might be flipped in the output (but see non_negative, below). non_negative : boolean, optional, default np.float64 Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. Examples -------- >>> from sklearn.feature_extraction import FeatureHasher >>> h = FeatureHasher(n_features=10) >>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}] >>> f = h.transform(D) >>> f.toarray() array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.], [ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]]) See also -------- DictVectorizer : vectorizes string-valued features using a hash table. sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features encoded as columns of integers. """ def __init__(self, n_features=(2 ** 20), input_type="dict", dtype=np.float64, non_negative=False): self._validate_params(n_features, input_type) self.dtype = dtype self.input_type = input_type self.n_features = n_features self.non_negative = non_negative @staticmethod def _validate_params(n_features, input_type): # strangely, np.int16 instances are not instances of Integral, # while np.int64 instances are... if not isinstance(n_features, (numbers.Integral, np.integer)): raise TypeError("n_features must be integral, got %r (%s)." % (n_features, type(n_features))) elif n_features < 1 or n_features >= 2 ** 31: raise ValueError("Invalid number of features (%d)." % n_features) if input_type not in ("dict", "pair", "string"): raise ValueError("input_type must be 'dict', 'pair' or 'string'," " got %r." % input_type) def fit(self, X=None, y=None): """No-op. This method doesn't do anything. It exists purely for compatibility with the scikit-learn transformer API. Returns ------- self : FeatureHasher """ # repeat input validation for grid search (which calls set_params) self._validate_params(self.n_features, self.input_type) return self def transform(self, raw_X, y=None): """Transform a sequence of instances to a scipy.sparse matrix. Parameters ---------- raw_X : iterable over iterable over raw features, length = n_samples Samples. Each sample must be iterable an (e.g., a list or tuple) containing/generating feature names (and optionally values, see the input_type constructor argument) which will be hashed. raw_X need not support the len function, so it can be the result of a generator; n_samples is determined on the fly. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Feature matrix, for use with estimators or further transformers. """ raw_X = iter(raw_X) if self.input_type == "dict": raw_X = (_iteritems(d) for d in raw_X) elif self.input_type == "string": raw_X = (((f, 1) for f in x) for x in raw_X) indices, indptr, values = \ _hashing.transform(raw_X, self.n_features, self.dtype) n_samples = indptr.shape[0] - 1 if n_samples == 0: raise ValueError("Cannot vectorize empty sequence.") X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype, shape=(n_samples, self.n_features)) X.sum_duplicates() # also sorts the indices if self.non_negative: np.abs(X.data, X.data) return X
bsd-3-clause
allisony/pyspeckit
examples/h2co_mm_example_despotic.py
6
2746
import pyspeckit as psk from pyspeckit.spectrum import models from astropy.table import Table from spectral_cube import SpectralCube import numpy as np import matplotlib.pyplot as plt import despotic import pyspeckit.spectrum.readers.read_class import os import shutil if not os.path.exists('ph2cogrid.fits'): if not os.path.exists('protostellarCore.desp'): despotic_install_path = (os.path.split(despotic.__file__))[0] shutil.copy(despotic_install_path+'/cloudfiles/protostellarCore.desp',os.getcwd()) models.formaldehyde_mm.build_despotic_grids(gridfile='ph2cogrid.fits', DvUpper=10) t = Table.read('ph2cogrid.fits') # This returns interpolating functions that take physical parameters # and returns values for Tex, Tau for the three mm transitions. f1, f2, f3 = models.formaldehyde_mm.formaldehyde_mm_despotic_functions(t) # Instantiate that fitter! formaldehyde_fitter=models.model.SpectralModel(models.formaldehyde_mm.formaldehyde_mm_despotic, 5, parnames=['temperature', 'column', 'density', 'center', 'width'], parvalues=[50,12,5.0,0,2], parlimited=[(True, True), (True, True), (True, True), (False, False), (True, False)], parlimits=[(5,205), (10,17), (2,7), (0,0), (0,0)], parsteps=[0.01, 0.01, 0.1, 0, 0], fitunits='Hz', h2co_303_202=f1, # interpolation of (Tex, tau) h2co_322_221=f2, h2co_321_220=f3, shortvarnames=("T", "N", "n", "v", "\\sigma")) sp = pyspeckit.readers.read_class.class_to_spectra('example_h2co_mm_spectrum.apex') sp.data *= 1/0.75 # T_A* -> T_MB sp.unit = "$T_{MB}$" # estimate the error from the data # sp.error[:] = sp.stats((2.183e2,2.184e2))['std'] sp.Registry.add_fitter('formaldehyde_mm_despotic', formaldehyde_fitter, 5) #plot fit for all 3 ('both') sp.plotter(figure=1) sp.specfit(fittype='formaldehyde_mm_despotic', guesses=[95, 14.5, 4, 0.0, 4.0], limits=[(10,300), (11,15), (2,7), (-20,150), (1, 10)], limited=[(True, True)]*5, fixed=[False, False, True, False, False]) sp.plotter.savefig('test_fitting_figure_01.png')
mit
probml/pyprobml
scripts/correlation2d.py
1
3494
import numpy as np from numpy import linalg as la import matplotlib matplotlib.use('TKAgg') import matplotlib.pyplot as plt from matplotlib.widgets import Slider ''' https://en.wikipedia.org/wiki/Correlation_and_dependence#/media/File:Correlation_examples2.svg The corr(X, Y) == 1 iff y = a * x + b The following code demonstrate the above linked image that is included in ML 1st Edition. There are two sliders in the UI that allow a user to change the rotation and aspect ratio of the 2D point cloud. The corr(X, Y) value is also displayed. A line fitted by linear regression on the 2D point cloud is also shown. The SSE of linear regression satisfies: SSE / cov(Y,Y) = 1 - corr(X,Y) Thus SSE == 0 iff corr(X,Y) == 1 ''' def Rotation(theta): ''' Return a 2D rotation matrix ''' c = np.cos(theta) s = np.sin(theta) return np.array([[ c, -s], [s, c]], dtype=np.float32) def Scale(aspect): ''' Return a 2D scale matrix ''' a = aspect return np.array([[1.0, 0.0], [0.0, a]], dtype=np.float32) def GeneratePoints(n, r): ''' Uniformlly sample n 2d points in the circle with radius r ''' result = [] while len(result) < n: p = r * (2.0 * np.random.random_sample() - 1.0),\ r * (2.0 * np.random.random_sample() - 1.0) if la.norm(p, 2) <= r: result.append(p) return np.array(result, dtype=np.float32) def LinearRegressionOn2DPoints(points): points_T = np.transpose(points) X0, Y0 = points_T[0, :], points_T[1, :] n = len(X0) ones = np.ones(n) A = np.vstack([X0, ones]).T a, b = la.lstsq(A, Y0)[0] alpha = np.array([a, b]).T Y = np.dot(np.vstack((X0, np.ones(len(X0)))).T, alpha) return X0, Y0, Y def Correlation2DPoints(points): X = points[:, 0] Y = points[:, 1] C = np.corrcoef(points[:, 0], points[:, 1]) return C[0,1] def TransformPoints(params): R = Rotation(params["theta"]) S = Scale(params["aspect"]) T = np.dot(R, S) points = params["original_points"] params["points"] = np.array([np.dot(T, np.transpose(point)) for point in points], dtype=np.float32) def updateHandler(key, text, points_plot, line_plot, params): def update(v): params[key] = v TransformPoints(params) X0, Y0, Y = LinearRegressionOn2DPoints(params["points"]) points_plot.set_xdata(X0) points_plot.set_ydata(Y0) line_plot.set_xdata(X0) line_plot.set_ydata(Y) text.set_text("Corr(x,y) %f" % Correlation2DPoints(params["points"])) return update def main(): points = GeneratePoints(1000, 4.0) theta = np.pi / 4.0 aspect = 0.5 params = {"theta": theta, "aspect": aspect, "original_points": points, "points": points} TransformPoints(params) X0, Y0, Y = LinearRegressionOn2DPoints(params["points"]) points_plot, = plt.plot(X0, Y0, 'o') line_plot, = plt.plot(X0, Y, 'r') plt.axis('equal') # Add UI Text and Sliders text = plt.text(-4.5, 3.5, "Corr(x,y) %f" % Correlation2DPoints(params["points"]), fontsize=15) ax_aspect = plt.axes([0.25, 0.1, 0.65, 0.03]) ax_theta = plt.axes([0.25, 0.15, 0.65, 0.03]) aspect_slider = Slider(ax_aspect, 'Aspect', 0.0, 1.0, valinit=aspect) theta_slider = Slider(ax_theta, 'Theta', -0.5 * np.pi, 0.5 * np.pi, valinit=theta) aspect_slider.on_changed(updateHandler("aspect", text, points_plot, line_plot, params)) theta_slider.on_changed(updateHandler("theta", text, points_plot, line_plot, params)) plt.show() if __name__ == "__main__": main()
mit
HannesHolste/littlebigo
littlebigo/analysis.py
1
2781
import numpy as np import pandas as pd import matplotlib.pyplot as plt def df_from_benchmarks(benchmarks_db, extract_input_size): df = pd.DataFrame( columns=['id', 'group_id', 'input_size', 'cpu_kernel_time_secs', 'cpu_user_time_secs', 'resident_set_memory_kb', 'wall_time_secs']) # Populate dataframe with timing information from database i = 0 for benchmark in benchmarks_db.all(): if benchmark['timing_result'] is not None: row = ([benchmark['id'], benchmark['group_id'], extract_input_size(benchmark['command_with_param']), benchmark['timing_result']['cpu_kernel_time_secs'], benchmark['timing_result']['cpu_user_time_secs'], benchmark['timing_result']['resident_set_memory_kb'], benchmark['timing_result']['wall_time_secs']]) df.loc[i] = row i += 1 # Unit conversions and new columns df['resident_set_memory_kb'] = df['resident_set_memory_kb'].astype(int) df['input_size'] = df['input_size'].astype(int) # Introduce new columns df['cpu_time_total_secs'] = df['cpu_user_time_secs'] + df[ 'cpu_kernel_time_secs'] df['resident_set_memory_mb'] = (df['resident_set_memory_kb'] / 1000).astype(float) return df def df_from_benchmarks_aggregated(benchmarks_db, extract_input_size): df = df_from_benchmarks(benchmarks_db, extract_input_size) cols_with_errorbars = ['cpu_kernel_time_secs', 'cpu_user_time_secs', 'resident_set_memory_kb', 'wall_time_secs', 'cpu_time_total_secs', 'resident_set_memory_mb'] # Calculate info for error bars for each col in cols_with_errorbars df_final = pd.DataFrame() for col in cols_with_errorbars: # calculate min and std df_errors = (df.groupby(['input_size'], as_index=False)[col] .agg({'{}_min'.format(col): np.min, '{}_std'.format(col): np.std}) .sort_values(by=['{}_min'.format(col)])) # ensure that 'input_size' column is not duplicated cols_to_use = df_errors.columns.difference(df_final.columns) df_final = pd.concat([df_final, df_errors[cols_to_use]], axis=1) return df_final def generate_plot(df, y_name, y_std_name=None): x = df['input_size'] y = df[y_name] plt.scatter(x, y) plt.ylim(0) if y_std_name is not None: plt.errorbar(x, y, yerr=(df[y_std_name])) return plt
bsd-3-clause
zhenv5/scikit-learn
examples/linear_model/plot_polynomial_interpolation.py
251
1895
#!/usr/bin/env python """ ======================== Polynomial interpolation ======================== This example demonstrates how to approximate a function with a polynomial of degree n_degree by using ridge regression. Concretely, from n_samples 1d points, it suffices to build the Vandermonde matrix, which is n_samples x n_degree+1 and has the following form: [[1, x_1, x_1 ** 2, x_1 ** 3, ...], [1, x_2, x_2 ** 2, x_2 ** 3, ...], ...] Intuitively, this matrix can be interpreted as a matrix of pseudo features (the points raised to some power). The matrix is akin to (but different from) the matrix induced by a polynomial kernel. This example shows that you can do non-linear regression with a linear model, using a pipeline to add non-linear features. Kernel methods extend this idea and can induce very high (even infinite) dimensional feature spaces. """ print(__doc__) # Author: Mathieu Blondel # Jake Vanderplas # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline def f(x): """ function to approximate by polynomial interpolation""" return x * np.sin(x) # generate points used to plot x_plot = np.linspace(0, 10, 100) # generate points and keep a subset of them x = np.linspace(0, 10, 100) rng = np.random.RandomState(0) rng.shuffle(x) x = np.sort(x[:20]) y = f(x) # create matrix versions of these arrays X = x[:, np.newaxis] X_plot = x_plot[:, np.newaxis] plt.plot(x_plot, f(x_plot), label="ground truth") plt.scatter(x, y, label="training points") for degree in [3, 4, 5]: model = make_pipeline(PolynomialFeatures(degree), Ridge()) model.fit(X, y) y_plot = model.predict(X_plot) plt.plot(x_plot, y_plot, label="degree %d" % degree) plt.legend(loc='lower left') plt.show()
bsd-3-clause
nico202/pyNeMo
libs/IO.py
1
13196
""" This module defines various useful functions used for Input and Output like loading the config file, checking dependencies, various conversions etc. """ #Transform input time to ms (steps): # example: 100 = 100 (ms) # example: 30s = 30000 (ms) # example: 10m = 600000 (ms) def time_to_steps(input_time): try: if 'm' in input_time: #minutes steps = float(input_time.strip('m')) * 60 * 1000 elif 's' in input_time: #Seconds steps = float(input_time.strip('s')) * 1000 else: steps = input_time except ValueError: exit("Steps parameter can contain either 'm' (minutes) or 's' (seconds)") except TypeError: return None return int(steps) #Dependency check: check if all required module are present #(without actually importing them) def dependency_check(modules): import imp for module in modules: try: imp.find_module(module) found = True except ImportError: found = False if not found: exit("Could not find required module: %s" % (module)) def load_network_file(network_file, hooks): import imp #Ultra-important. Prevent the creation of pyc files, that get imported by imp #... preventing the reload of the source, leading to wrong input -> wrong output #... in batch runs ############################### import sys sys.dont_write_bytecode = True ############################### try: config_name = try_load_vue(network_file, hooks) config = imp.load_source('*', config_name) except SyntaxError: cprint("Config file: Syntax Error", 'fail') raise except IOError: cprint("Error: network file (%s) does not exists" % (network_file), 'fail') exit() except NameError: cprint("This VUE contains undefined variables (is it for a batch?)", 'fail') raise except: cprint("DEBUG: Unknown error", 'fail') raise return config, config_name def try_load_vue(config_name, hooks=("", "")): #FIXME: relative path etc if ".vue" in config_name: import libs.VUEtoPy as VUEtoPy #TODO: verbosity fix cprint("Converting input VUE to py", 'info') VUEtoPy.VUEtoPyConverter(config_name, hooks) config_name = "".join((config_name.split(".")[:-1])) config_name += ".py" return "./" + config_name def import_network( (network_file, hooks), (use_cuda, cuda_backend_number), (disable_sensory) #FIXME: use this ): ''' Pass the network file to load_network_file, then: * Add neurons and synapses to Nemo returns [ nemo_simulation, to_save ] ''' network_config, network_name = load_network_file(network_file, hooks) import nemo nemo_net = nemo.Network() nemo_config = nemo.Configuration() nemo_select_backend(nemo_config, (use_cuda, cuda_backend_number)) #Add network neurons nemo_add_neurons(nemo_net, network_config.neurons[0]) #Add networks synapses nemo_add_synapses(nemo_net, network_config.synapses) #Create the simulation nemo_simulation = nemo.Simulation(nemo_net, nemo_config) return ([ nemo_simulation, #0 ( #1 network_config.save, #Used to coninue numeration on sensor len(network_config.neurons[0]), network_config.step_input, network_config.sensory_neurons, network_config.neurons, network_config.synapses, network_config.name ) ]), network_name def nemo_add_neurons(net, neuron_list, start_id=0): iz = net.add_neuron_type('Izhikevich') #TODO: edit conifg file to allow this? #km = net.add_neuron_type('Kuramoto') for nidx, neuron in enumerate(neuron_list): n_id = nidx + start_id a, b, c, d, s, u, v = neuron net.add_neuron(iz, n_id, a, b, c, d, s, u, v) def nemo_add_synapses(net, synaspes_list): for sidx, synapse in enumerate(synaspes_list): source, dests, synaptic_prop = synapse delay, weights, plastic = synaptic_prop try: #dests is a single value or a list? dests = int(dests) dests = [dests] except TypeError: #Is already a list :) pass try: #if weight length is 1, apply it to all synapses weights = int(weights) weights = [weights] except TypeError: #Already an array pass if len(weights) != len(dests): if len(weights) == 1: #List of weight matching number of outpu nurons weights = weights * len(dests) else: exit("Malformed synapse line %d" % (sidx + 1)) net.add_synapse(source, dests, delay, weights, plastic) def nemo_select_backend( nemo_config, (use_cuda, backend_number) ): if use_cuda: try: nemo_config.set_cuda_backend(backend_number) except RuntimeError: #FIXME: verbosity print "No CUDA-GPU found: using CPU instead" nemo_config.set_cpu_backend() else: nemo_config.set_cpu_backend() def saveKey(filename, values, out_dir=".", compress=True, compress_format="gzip", force_write=False): ''' Saves a dict to a file. If compress enabled, saves to gz (compress less then bz2 but faster) ''' import os.path filename = str(filename) out_dir = str(out_dir) is_folder(out_dir) output_name = out_dir + '/' + filename values = str(values) if not os.path.isfile(output_name) or force_write: try: if compress: if compress_format in ["gzip", "gz"]: import gzip with gzip.open(output_name + ".gz", 'wb') as f: f.write(values) elif compress_format in ["bzip2", "bzip", "bz2"]: import bz2 file_out = bz2.BZ2File(output_name + ".bz2", 'wb') try: file_out.write(values) finally: file_out.close() values = bz2.compress("%s" % values) else: output = open(output_name, 'w') output.write("%s" % values) output.close() except: print "Probem writing file?! DEBUG me" raise return True else: return False def hashIt(module): #We use this to check if configuration changed import hashlib #Benchmarked: hashlib.sha1, hashlib.sha256, hashlib.md5 #hashlib.sha1 is the fastest key_string = str({key: value for key, value in module.__dict__.iteritems() if not (key.startswith('__') or key.startswith('_'))}) return key_string, str(hashlib.sha1(key_string).hexdigest()) def hashDict(dictionary): import hashlib return str(hashlib.sha1(str(dictionary)).hexdigest()) def is_folder(output_dir=".store"): #FIXME: allow recursive creation import os.path output_dir = str(output_dir) if not os.path.isdir(output_dir): if not os.path.exists(output_dir): try: os.mkdir(output_dir) except: print "Unknown error creating folder" raise return True else: exit("Destination must be a folder! Check your config") else: return True def write_log(uniqueId, output_dir="history", name="history.log"): import time history = open(output_dir + "/" + name, 'a') #Update the history history.write("%f, %s\n" % (time.time(), uniqueId)) history.close() def write_batch_log(sessionId, cycle, output_dir="batch"): print cycle, output_dir, sessionId save = { "cycle":cycle } #Do we need something else? saveKey(str(sessionId), save, output_dir, compress=False, force_write=True) def membraneImage(values, title=False, close=True, scales=None): """ Output an image of the membrane potential from a list of Membrane values. zoom = (stretch_x, stretch_y) means the stretch that is applied to x and y axes """ #https://docs.python.org/2/tutorial/controlflow.html#default-argument-values if not scales: scales = [] #TODO: Add stimulation trace import matplotlib.pyplot as plt import numpy as np Vm_list = values if close: plt.clf() plt.cla() x = len(Vm_list) x = np.array(range(0, x)) fig, ax1 = plt.subplots() ax1.plot(x, np.array(Vm_list)) ax1.set_xlabel('time (ms)') ax1.set_ylabel('Membrane Potential\n(mV)') if title: plt.title(title) plt.show() return plt #COLORS _NUMERALS = '0123456789abcdefABCDEF' _HEXDEC = {v: int(v, 16) for v in (x+y for x in _NUMERALS for y in _NUMERALS)} LOWERCASE, UPPERCASE = 'x', 'X' MAP = {0: "R", 1: "G", 2: "B"} COLORS=['k', 'm', 'r', 'y', 'w', 'g', 'c', [ 0.08, 0.80, 0.3], [ 0.68531278, 0.89077202,0.47581724], [ 0.80060385, 0.5, 0.1], [ 0.85033402,0.51683594, 0.11595041], [ 0.69312674, 0.66558017, 0.19565276], [0.09279514, 0.29094335, 0.85578709], [ 0.12722716, 0.62390063, 0.6649016 ], [ 0.91393435, 0.40349173, 0.10641532], [ 0.75184848, 0.79743901, 0.52527901], [ 0.44535098, 0.97818332, 0.03139634], [ 0.57541834, 0.22390996, 0.92280031], [ 0.22892716, 0.24278792, 0.8204559 ], [ 0.19601175, 0.79459385, 0.48783126], [ 0.10710888, 0.30693979, 0.01899005], [ 0.35588286, 0.18795917, 0.14780942], [ 0.80756219, 0.56990641, 0.00645912], [ 0.65984658, 0.12283337, 0.78819321], [ 0.17601197, 0.78862402, 0.78049605], [ 0.57904688, 0.31030127, 0.65775516],[ 0.25918713, 0.40173477, 0.19886916]] def rgb(triplet): return _HEXDEC[triplet[0:2]], _HEXDEC[triplet[2:4]], _HEXDEC[triplet[4:6]] def cprint(text, color="okblue", debug=False): colors = { 'okblue': '\033[94m', 'info': '\033[94m', 'okgreen': '\033[92m', 'endc': '\033[0m', 'warning': '\033[93m', 'fail': '\033[91m', 'red': '\033[91m' #=fail } if not debug:# or debug: #change if enable debug print colors[color]+text+colors['endc'] def saveFile(file_source, file_dest): ''' Copy file #TODO: Enable compression (bz2/_lzma_) ''' from shutil import copy2 from os.path import isfile output_name = file_dest if not isfile(output_name): try: copy2(file_source, file_dest) except: print "Probem copying file?! DEBUG me" raise else: return False def read_output(f, path, idx=0, total=0): import imp #Move import_istory to libs.IO? from plugins.importer import import_history from os.path import isfile, join fail = False print "[%s/%s]\tUsing file: %s" % (idx, total, f) #_input.bz2 could be used, but is more difficult to extract, and is heavier input_file = join(path, f.split("_")[1]) + "_input.py" try: input_conf = imp.load_source('*', input_file) except IOError: #File _input.py missing, save name to file, will try later? broken = open('ANALYSIS_FAILED.csv', 'a') broken.write("%s,%s\n" % (f.split("_")[0], f.split("_")[1])) broken.close() cprint("FAILED, SKIPPING", 'fail') fail = True except SyntaxError: broken = open('CONVERT_VUE_TO_PY.csv', 'a') broken.write("%s,%s\n" % (f.split("_")[0], f.split("_")[1])) broken.close() cprint("FAILED, SKIPPING", 'fail') fail = True input_conf = vars(input_conf) remove = ["step_input", "_S", "_N", "_stimuli", "_typicalN"] input_conf_clear = {} for var in input_conf: if var not in remove and not var.startswith("__"): input_conf_clear[var] = input_conf[var] data = import_history(join(path, f), compressed=True) #FIXME: allow uncompressed return data, input_conf_clear if not fail else False def list_all(path, start_from, end_to): from os import listdir from os.path import isfile, join files = [f for f in listdir(path) if isfile(join(path, f))] #FIXME: allow uncompressed outputs = list(set([f for f in files if "_output" in f])) #Filter out unwanted runs outputs = outputs[start_from:end_to] if end_to else outputs[start_from:] total = len(outputs) + start_from return outputs, total def ask(msg , exit_msg="Change your cli params then!" , sure="Are you sure? [y/n]" ): action = "z" while action.capitalize() not in ["Y", "N"]: action = raw_input(msg + "\n" + sure +": ") if action.capitalize() == "Y": break else: cprint(exit_msg, 'warning') exit() def reset_world(): #TODO: move to own function (like in IO.py) ''' Reset gazebo world. Replaces reset_all ''' import subprocess subprocess.call(["gz", "world", "-r"]) return True
gpl-2.0
alphaBenj/zipline
zipline/utils/calendars/exchange_calendar_tsx.py
5
2687
from datetime import time from pandas.tseries.holiday import ( Holiday, DateOffset, MO, weekend_to_monday, GoodFriday ) from pytz import timezone from zipline.utils.calendars.trading_calendar import TradingCalendar, \ HolidayCalendar from zipline.utils.calendars.us_holidays import Christmas from zipline.utils.calendars.exchange_calendar_lse import ( WeekendChristmas, BoxingDay, WeekendBoxingDay ) # New Year's Day TSXNewYearsDay = Holiday( "New Year's Day", month=1, day=1, observance=weekend_to_monday, ) # Ontario Family Day FamilyDay = Holiday( "Family Day", month=2, day=1, offset=DateOffset(weekday=MO(3)), start_date='2008-01-01', ) # Victoria Day VictoriaDay = Holiday( 'Victoria Day', month=5, day=25, offset=DateOffset(weekday=MO(-1)), ) # Canada Day CanadaDay = Holiday( 'Canada Day', month=7, day=1, observance=weekend_to_monday, ) # Civic Holiday CivicHoliday = Holiday( 'Civic Holiday', month=8, day=1, offset=DateOffset(weekday=MO(1)), ) # Labor Day LaborDay = Holiday( 'Labor Day', month=9, day=1, offset=DateOffset(weekday=MO(1)), ) # Thanksgiving Thanksgiving = Holiday( 'Thanksgiving', month=10, day=1, offset=DateOffset(weekday=MO(2)), ) class TSXExchangeCalendar(TradingCalendar): """ Exchange calendar for the Toronto Stock Exchange Open Time: 9:30 AM, EST Close Time: 4:00 PM, EST Regularly-Observed Holidays: - New Years Day (observed on first business day on/after) - Family Day (Third Monday in February after 2008) - Good Friday - Victoria Day (Monday before May 25th) - Canada Day (July 1st, observed first business day after) - Civic Holiday (First Monday in August) - Labor Day (First Monday in September) - Thanksgiving (Second Monday in October) - Christmas Day - Dec. 27th (if Christmas is on a weekend) - Boxing Day - Dec. 28th (if Boxing Day is on a weekend) """ @property def name(self): return "TSX" @property def tz(self): return timezone('Canada/Atlantic') @property def open_time(self): return time(9, 31) @property def close_time(self): return time(16) @property def regular_holidays(self): return HolidayCalendar([ TSXNewYearsDay, FamilyDay, GoodFriday, VictoriaDay, CanadaDay, CivicHoliday, LaborDay, Thanksgiving, Christmas, WeekendChristmas, BoxingDay, WeekendBoxingDay ])
apache-2.0
lbishal/scikit-learn
examples/cluster/plot_cluster_comparison.py
246
4684
""" ========================================================= Comparing different clustering algorithms on toy datasets ========================================================= This example aims at showing characteristics of different clustering algorithms on datasets that are "interesting" but still in 2D. The last dataset is an example of a 'null' situation for clustering: the data is homogeneous, and there is no good clustering. While these examples give some intuition about the algorithms, this intuition might not apply to very high dimensional data. The results could be improved by tweaking the parameters for each clustering strategy, for instance setting the number of clusters for the methods that needs this parameter specified. Note that affinity propagation has a tendency to create many clusters. Thus in this example its two parameters (damping and per-point preference) were set to to mitigate this behavior. """ print(__doc__) import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster, datasets from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler np.random.seed(0) # Generate datasets. We choose the size big enough to see the scalability # of the algorithms, but not too big to avoid too long running times n_samples = 1500 noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05) blobs = datasets.make_blobs(n_samples=n_samples, random_state=8) no_structure = np.random.rand(n_samples, 2), None colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) clustering_names = [ 'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift', 'SpectralClustering', 'Ward', 'AgglomerativeClustering', 'DBSCAN', 'Birch'] plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5)) plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05, hspace=.01) plot_num = 1 datasets = [noisy_circles, noisy_moons, blobs, no_structure] for i_dataset, dataset in enumerate(datasets): X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # estimate bandwidth for mean shift bandwidth = cluster.estimate_bandwidth(X, quantile=0.3) # connectivity matrix for structured Ward connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) # make connectivity symmetric connectivity = 0.5 * (connectivity + connectivity.T) # create clustering estimators ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True) two_means = cluster.MiniBatchKMeans(n_clusters=2) ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward', connectivity=connectivity) spectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity="nearest_neighbors") dbscan = cluster.DBSCAN(eps=.2) affinity_propagation = cluster.AffinityPropagation(damping=.9, preference=-200) average_linkage = cluster.AgglomerativeClustering( linkage="average", affinity="cityblock", n_clusters=2, connectivity=connectivity) birch = cluster.Birch(n_clusters=2) clustering_algorithms = [ two_means, affinity_propagation, ms, spectral, ward, average_linkage, dbscan, birch] for name, algorithm in zip(clustering_names, clustering_algorithms): # predict cluster memberships t0 = time.time() algorithm.fit(X) t1 = time.time() if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) # plot plt.subplot(4, len(clustering_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=18) plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10) if hasattr(algorithm, 'cluster_centers_'): centers = algorithm.cluster_centers_ center_colors = colors[:len(centers)] plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors) plt.xlim(-2, 2) plt.ylim(-2, 2) plt.xticks(()) plt.yticks(()) plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plot_num += 1 plt.show()
bsd-3-clause
jseabold/statsmodels
statsmodels/datasets/strikes/data.py
4
1875
"""U.S. Strike Duration Data""" from statsmodels.datasets import utils as du __docformat__ = 'restructuredtext' COPYRIGHT = """This is public domain.""" TITLE = __doc__ SOURCE = """ This is a subset of the data used in Kennan (1985). It was originally published by the Bureau of Labor Statistics. :: Kennan, J. 1985. "The duration of contract strikes in US manufacturing. `Journal of Econometrics` 28.1, 5-28. """ DESCRSHORT = """Contains data on the length of strikes in US manufacturing and unanticipated industrial production.""" DESCRLONG = """Contains data on the length of strikes in US manufacturing and unanticipated industrial production. The data is a subset of the data originally used by Kennan. The data here is data for the months of June only to avoid seasonal issues.""" #suggested notes NOTE = """:: Number of observations - 62 Number of variables - 2 Variable name definitions:: duration - duration of the strike in days iprod - unanticipated industrial production """ def load_pandas(): """ Load the strikes data and return a Dataset class instance. Returns ------- Dataset See DATASET_PROPOSAL.txt for more information. """ data = _get_data() return du.process_pandas(data, endog_idx=0) def load(as_pandas=None): """ Load the strikes data and return a Dataset class instance. Parameters ---------- as_pandas : bool Flag indicating whether to return pandas DataFrames and Series or numpy recarrays and arrays. If True, returns pandas. Returns ------- Dataset See DATASET_PROPOSAL.txt for more information. """ return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas) def _get_data(): return du.load_csv(__file__,'strikes.csv').astype(float)
bsd-3-clause
fyffyt/scikit-learn
sklearn/cross_validation.py
47
67782
""" The :mod:`sklearn.cross_validation` module includes utilities for cross- validation and performance evaluation. """ # Author: Alexandre Gramfort <[email protected]>, # Gael Varoquaux <[email protected]>, # Olivier Grisel <[email protected]> # License: BSD 3 clause from __future__ import print_function from __future__ import division import warnings from itertools import chain, combinations from math import ceil, floor, factorial import numbers import time from abc import ABCMeta, abstractmethod import numpy as np import scipy.sparse as sp from .base import is_classifier, clone from .utils import indexable, check_random_state, safe_indexing from .utils.validation import (_is_arraylike, _num_samples, check_array, column_or_1d) from .utils.multiclass import type_of_target from .externals.joblib import Parallel, delayed, logger from .externals.six import with_metaclass from .externals.six.moves import zip from .metrics.scorer import check_scoring from .utils.fixes import bincount __all__ = ['KFold', 'LabelKFold', 'LeaveOneLabelOut', 'LeaveOneOut', 'LeavePLabelOut', 'LeavePOut', 'ShuffleSplit', 'StratifiedKFold', 'StratifiedShuffleSplit', 'PredefinedSplit', 'LabelShuffleSplit', 'check_cv', 'cross_val_score', 'cross_val_predict', 'permutation_test_score', 'train_test_split'] class _PartitionIterator(with_metaclass(ABCMeta)): """Base class for CV iterators where train_mask = ~test_mask Implementations must define `_iter_test_masks` or `_iter_test_indices`. Parameters ---------- n : int Total number of elements in dataset. """ def __init__(self, n): if abs(n - int(n)) >= np.finfo('f').eps: raise ValueError("n must be an integer") self.n = int(n) def __iter__(self): ind = np.arange(self.n) for test_index in self._iter_test_masks(): train_index = np.logical_not(test_index) train_index = ind[train_index] test_index = ind[test_index] yield train_index, test_index # Since subclasses must implement either _iter_test_masks or # _iter_test_indices, neither can be abstract. def _iter_test_masks(self): """Generates boolean masks corresponding to test sets. By default, delegates to _iter_test_indices() """ for test_index in self._iter_test_indices(): test_mask = self._empty_mask() test_mask[test_index] = True yield test_mask def _iter_test_indices(self): """Generates integer indices corresponding to test sets.""" raise NotImplementedError def _empty_mask(self): return np.zeros(self.n, dtype=np.bool) class LeaveOneOut(_PartitionIterator): """Leave-One-Out cross validation iterator. Provides train/test indices to split data in train test sets. Each sample is used once as a test set (singleton) while the remaining samples form the training set. Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and ``LeavePOut(n, p=1)``. Due to the high number of test sets (which is the same as the number of samples) this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4]]) >>> y = np.array([1, 2]) >>> loo = cross_validation.LeaveOneOut(2) >>> len(loo) 2 >>> print(loo) sklearn.cross_validation.LeaveOneOut(n=2) >>> for train_index, test_index in loo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [1] TEST: [0] [[3 4]] [[1 2]] [2] [1] TRAIN: [0] TEST: [1] [[1 2]] [[3 4]] [1] [2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def _iter_test_indices(self): return range(self.n) def __repr__(self): return '%s.%s(n=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, ) def __len__(self): return self.n class LeavePOut(_PartitionIterator): """Leave-P-Out cross validation iterator Provides train/test indices to split data in train test sets. This results in testing on all distinct samples of size p, while the remaining n - p samples form the training set in each iteration. Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)`` which creates non-overlapping test sets. Due to the high number of iterations which grows combinatorically with the number of samples this cross validation method can be very costly. For large datasets one should favor KFold, StratifiedKFold or ShuffleSplit. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in dataset. p : int Size of the test sets. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> lpo = cross_validation.LeavePOut(4, 2) >>> len(lpo) 6 >>> print(lpo) sklearn.cross_validation.LeavePOut(n=4, p=2) >>> for train_index, test_index in lpo: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [1 3] TEST: [0 2] TRAIN: [1 2] TEST: [0 3] TRAIN: [0 3] TEST: [1 2] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 1] TEST: [2 3] """ def __init__(self, n, p): super(LeavePOut, self).__init__(n) self.p = p def _iter_test_indices(self): for comb in combinations(range(self.n), self.p): yield np.array(comb) def __repr__(self): return '%s.%s(n=%i, p=%i)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.p, ) def __len__(self): return int(factorial(self.n) / factorial(self.n - self.p) / factorial(self.p)) class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)): """Base class to validate KFold approaches""" @abstractmethod def __init__(self, n, n_folds, shuffle, random_state): super(_BaseKFold, self).__init__(n) if abs(n_folds - int(n_folds)) >= np.finfo('f').eps: raise ValueError("n_folds must be an integer") self.n_folds = n_folds = int(n_folds) if n_folds <= 1: raise ValueError( "k-fold cross validation requires at least one" " train / test split by setting n_folds=2 or more," " got n_folds={0}.".format(n_folds)) if n_folds > self.n: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of samples: {1}.").format(n_folds, n)) if not isinstance(shuffle, bool): raise TypeError("shuffle must be True or False;" " got {0}".format(shuffle)) self.shuffle = shuffle self.random_state = random_state class KFold(_BaseKFold): """K-Folds cross validation iterator. Provides train/test indices to split data in train test sets. Split dataset into k consecutive folds (without shuffling by default). Each fold is then used a validation set once while the k - 1 remaining fold form the training set. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.cross_validation import KFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> kf = KFold(4, n_folds=2) >>> len(kf) 2 >>> print(kf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in kf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [2 3] TEST: [0 1] TRAIN: [0 1] TEST: [2 3] Notes ----- The first n % n_folds folds have size n // n_folds + 1, other folds have size n // n_folds. See also -------- StratifiedKFold: take label information into account to avoid building folds with imbalanced class distributions (for binary or multiclass classification tasks). LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, n, n_folds=3, shuffle=False, random_state=None): super(KFold, self).__init__(n, n_folds, shuffle, random_state) self.idxs = np.arange(n) if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): n = self.n n_folds = self.n_folds fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int) fold_sizes[:n % n_folds] += 1 current = 0 for fold_size in fold_sizes: start, stop = current, current + fold_size yield self.idxs[start:stop] current = stop def __repr__(self): return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LabelKFold(_BaseKFold): """K-fold iterator variant with non-overlapping labels. The same label will not appear in two different folds (the number of distinct labels has to be at least equal to the number of folds). The folds are approximately balanced in the sense that the number of distinct labels is approximately the same in each fold. Parameters ---------- labels : array-like with shape (n_samples, ) Contains a label for each sample. The folds are built so that the same label does not appear in two different folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.cross_validation import LabelKFold >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 3, 4]) >>> labels = np.array([0, 0, 2, 2]) >>> label_kfold = LabelKFold(labels, n_folds=2) >>> len(label_kfold) 2 >>> print(label_kfold) sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2) >>> for train_index, test_index in label_kfold: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) ... TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [3 4] TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [3 4] [1 2] See also -------- LeaveOneLabelOut for splitting the data according to explicit, domain-specific stratification of the dataset. """ def __init__(self, labels, n_folds=3, shuffle=False, random_state=None): super(LabelKFold, self).__init__(len(labels), n_folds, shuffle, random_state) unique_labels, labels = np.unique(labels, return_inverse=True) n_labels = len(unique_labels) if n_folds > n_labels: raise ValueError( ("Cannot have number of folds n_folds={0} greater" " than the number of labels: {1}.").format(n_folds, n_labels)) # Weight labels by their number of occurences n_samples_per_label = np.bincount(labels) # Distribute the most frequent labels first indices = np.argsort(n_samples_per_label)[::-1] n_samples_per_label = n_samples_per_label[indices] # Total weight of each fold n_samples_per_fold = np.zeros(n_folds) # Mapping from label index to fold index label_to_fold = np.zeros(len(unique_labels)) # Distribute samples by adding the largest weight to the lightest fold for label_index, weight in enumerate(n_samples_per_label): lightest_fold = np.argmin(n_samples_per_fold) n_samples_per_fold[lightest_fold] += weight label_to_fold[indices[label_index]] = lightest_fold self.idxs = label_to_fold[labels] if shuffle: rng = check_random_state(self.random_state) rng.shuffle(self.idxs) def _iter_test_indices(self): for i in range(self.n_folds): yield (self.idxs == i) def __repr__(self): return '{0}.{1}(n_labels={2}, n_folds={3})'.format( self.__class__.__module__, self.__class__.__name__, self.n, self.n_folds, ) def __len__(self): return self.n_folds class StratifiedKFold(_BaseKFold): """Stratified K-Folds cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a variation of KFold that returns stratified folds. The folds are made by preserving the percentage of samples for each class. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array-like, [n_samples] Samples to split in K folds. n_folds : int, default=3 Number of folds. Must be at least 2. shuffle : boolean, optional Whether to shuffle each stratification of the data before splitting into batches. random_state : None, int or RandomState When shuffle=True, pseudo-random number generator state used for shuffling. If None, use default numpy RNG for shuffling. Examples -------- >>> from sklearn.cross_validation import StratifiedKFold >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> skf = StratifiedKFold(y, n_folds=2) >>> len(skf) 2 >>> print(skf) # doctest: +NORMALIZE_WHITESPACE sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2, shuffle=False, random_state=None) >>> for train_index, test_index in skf: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 3] TEST: [0 2] TRAIN: [0 2] TEST: [1 3] Notes ----- All the folds have size trunc(n_samples / n_folds), the last one has the complementary. See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, y, n_folds=3, shuffle=False, random_state=None): super(StratifiedKFold, self).__init__( len(y), n_folds, shuffle, random_state) y = np.asarray(y) n_samples = y.shape[0] unique_labels, y_inversed = np.unique(y, return_inverse=True) label_counts = bincount(y_inversed) min_labels = np.min(label_counts) if self.n_folds > min_labels: warnings.warn(("The least populated class in y has only %d" " members, which is too few. The minimum" " number of labels for any class cannot" " be less than n_folds=%d." % (min_labels, self.n_folds)), Warning) # don't want to use the same seed in each label's shuffle if self.shuffle: rng = check_random_state(self.random_state) else: rng = self.random_state # pre-assign each sample to a test fold index using individual KFold # splitting strategies for each label so as to respect the # balance of labels per_label_cvs = [ KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle, random_state=rng) for c in label_counts] test_folds = np.zeros(n_samples, dtype=np.int) for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)): for label, (_, test_split) in zip(unique_labels, per_label_splits): label_test_folds = test_folds[y == label] # the test split can be too big because we used # KFold(max(c, self.n_folds), self.n_folds) instead of # KFold(c, self.n_folds) to make it possible to not crash even # if the data is not 100% stratifiable for all the labels # (we use a warning instead of raising an exception) # If this is the case, let's trim it: test_split = test_split[test_split < len(label_test_folds)] label_test_folds[test_split] = test_fold_idx test_folds[y == label] = label_test_folds self.test_folds = test_folds self.y = y def _iter_test_masks(self): for i in range(self.n_folds): yield self.test_folds == i def __repr__(self): return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.y, self.n_folds, self.shuffle, self.random_state, ) def __len__(self): return self.n_folds class LeaveOneLabelOut(_PartitionIterator): """Leave-One-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) >>> y = np.array([1, 2, 1, 2]) >>> labels = np.array([1, 1, 2, 2]) >>> lol = cross_validation.LeaveOneLabelOut(labels) >>> len(lol) 2 >>> print(lol) sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2]) >>> for train_index, test_index in lol: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2 3] TEST: [0 1] [[5 6] [7 8]] [[1 2] [3 4]] [1 2] [1 2] TRAIN: [0 1] TEST: [2 3] [[1 2] [3 4]] [[5 6] [7 8]] [1 2] [1 2] See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, labels): super(LeaveOneLabelOut, self).__init__(len(labels)) # We make a copy of labels to avoid side-effects during iteration self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) def _iter_test_masks(self): for i in self.unique_labels: yield self.labels == i def __repr__(self): return '%s.%s(labels=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, ) def __len__(self): return self.n_unique_labels class LeavePLabelOut(_PartitionIterator): """Leave-P-Label_Out cross-validation iterator Provides train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LeaveOneLabelOut is that the former builds the test sets with all the samples assigned to ``p`` different values of the labels while the latter uses samples all assigned the same labels. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- labels : array-like of int with shape (n_samples,) Arbitrary domain-specific stratification of the data to be used to draw the splits. p : int Number of samples to leave out in the test split. Examples -------- >>> from sklearn import cross_validation >>> X = np.array([[1, 2], [3, 4], [5, 6]]) >>> y = np.array([1, 2, 1]) >>> labels = np.array([1, 2, 3]) >>> lpl = cross_validation.LeavePLabelOut(labels, p=2) >>> len(lpl) 3 >>> print(lpl) sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2) >>> for train_index, test_index in lpl: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] ... print(X_train, X_test, y_train, y_test) TRAIN: [2] TEST: [0 1] [[5 6]] [[1 2] [3 4]] [1] [1 2] TRAIN: [1] TEST: [0 2] [[3 4]] [[1 2] [5 6]] [2] [1 1] TRAIN: [0] TEST: [1 2] [[1 2]] [[3 4] [5 6]] [1] [2 1] See also -------- LabelKFold: K-fold iterator variant with non-overlapping labels. """ def __init__(self, labels, p): # We make a copy of labels to avoid side-effects during iteration super(LeavePLabelOut, self).__init__(len(labels)) self.labels = np.array(labels, copy=True) self.unique_labels = np.unique(labels) self.n_unique_labels = len(self.unique_labels) self.p = p def _iter_test_masks(self): comb = combinations(range(self.n_unique_labels), self.p) for idx in comb: test_index = self._empty_mask() idx = np.array(idx) for l in self.unique_labels[idx]: test_index[self.labels == l] = True yield test_index def __repr__(self): return '%s.%s(labels=%s, p=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.labels, self.p, ) def __len__(self): return int(factorial(self.n_unique_labels) / factorial(self.n_unique_labels - self.p) / factorial(self.p)) class BaseShuffleSplit(with_metaclass(ABCMeta)): """Base class for ShuffleSplit and StratifiedShuffleSplit""" def __init__(self, n, n_iter=10, test_size=0.1, train_size=None, random_state=None): self.n = n self.n_iter = n_iter self.test_size = test_size self.train_size = train_size self.random_state = random_state self.n_train, self.n_test = _validate_shuffle_split(n, test_size, train_size) def __iter__(self): for train, test in self._iter_indices(): yield train, test return @abstractmethod def _iter_indices(self): """Generate (train, test) indices""" class ShuffleSplit(BaseShuffleSplit): """Random permutation cross-validation iterator. Yields indices to split data into training and test sets. Note: contrary to other cross-validation strategies, random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- n : int Total number of elements in the dataset. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn import cross_validation >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... test_size=.25, random_state=0) >>> len(rs) 3 >>> print(rs) ... # doctest: +ELLIPSIS ShuffleSplit(4, n_iter=3, test_size=0.25, ...) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1 0] TEST: [2] TRAIN: [2 1 3] TEST: [0] TRAIN: [0 2 1] TEST: [3] >>> rs = cross_validation.ShuffleSplit(4, n_iter=3, ... train_size=0.5, test_size=.25, random_state=0) >>> for train_index, test_index in rs: ... print("TRAIN:", train_index, "TEST:", test_index) ... TRAIN: [3 1] TEST: [2] TRAIN: [2 1] TEST: [0] TRAIN: [0 2] TEST: [3] """ def _iter_indices(self): rng = check_random_state(self.random_state) for i in range(self.n_iter): # random partition permutation = rng.permutation(self.n) ind_test = permutation[:self.n_test] ind_train = permutation[self.n_test:self.n_test + self.n_train] yield ind_train, ind_test def __repr__(self): return ('%s(%d, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.n, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _validate_shuffle_split(n, test_size, train_size): if test_size is None and train_size is None: raise ValueError( 'test_size and train_size can not both be None') if test_size is not None: if np.asarray(test_size).dtype.kind == 'f': if test_size >= 1.: raise ValueError( 'test_size=%f should be smaller ' 'than 1.0 or be an integer' % test_size) elif np.asarray(test_size).dtype.kind == 'i': if test_size >= n: raise ValueError( 'test_size=%d should be smaller ' 'than the number of samples %d' % (test_size, n)) else: raise ValueError("Invalid value for test_size: %r" % test_size) if train_size is not None: if np.asarray(train_size).dtype.kind == 'f': if train_size >= 1.: raise ValueError("train_size=%f should be smaller " "than 1.0 or be an integer" % train_size) elif np.asarray(test_size).dtype.kind == 'f' and \ train_size + test_size > 1.: raise ValueError('The sum of test_size and train_size = %f, ' 'should be smaller than 1.0. Reduce ' 'test_size and/or train_size.' % (train_size + test_size)) elif np.asarray(train_size).dtype.kind == 'i': if train_size >= n: raise ValueError("train_size=%d should be smaller " "than the number of samples %d" % (train_size, n)) else: raise ValueError("Invalid value for train_size: %r" % train_size) if np.asarray(test_size).dtype.kind == 'f': n_test = ceil(test_size * n) elif np.asarray(test_size).dtype.kind == 'i': n_test = float(test_size) if train_size is None: n_train = n - n_test else: if np.asarray(train_size).dtype.kind == 'f': n_train = floor(train_size * n) else: n_train = float(train_size) if test_size is None: n_test = n - n_train if n_train + n_test > n: raise ValueError('The sum of train_size and test_size = %d, ' 'should be smaller than the number of ' 'samples %d. Reduce test_size and/or ' 'train_size.' % (n_train + n_test, n)) return int(n_train), int(n_test) class StratifiedShuffleSplit(BaseShuffleSplit): """Stratified ShuffleSplit cross validation iterator Provides train/test indices to split data in train test sets. This cross-validation object is a merge of StratifiedKFold and ShuffleSplit, which returns stratified randomized folds. The folds are made by preserving the percentage of samples for each class. Note: like the ShuffleSplit strategy, stratified random splits do not guarantee that all folds will be different, although this is still very likely for sizeable datasets. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- y : array, [n_samples] Labels of samples. n_iter : int (default 10) Number of re-shuffling & splitting iterations. test_size : float (default 0.1), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. Examples -------- >>> from sklearn.cross_validation import StratifiedShuffleSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0) >>> len(sss) 3 >>> print(sss) # doctest: +ELLIPSIS StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...) >>> for train_index, test_index in sss: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2] TEST: [3 0] TRAIN: [0 2] TEST: [1 3] TRAIN: [0 2] TEST: [3 1] """ def __init__(self, y, n_iter=10, test_size=0.1, train_size=None, random_state=None): super(StratifiedShuffleSplit, self).__init__( len(y), n_iter, test_size, train_size, random_state) self.y = np.array(y) self.classes, self.y_indices = np.unique(y, return_inverse=True) n_cls = self.classes.shape[0] if np.min(bincount(self.y_indices)) < 2: raise ValueError("The least populated class in y has only 1" " member, which is too few. The minimum" " number of labels for any class cannot" " be less than 2.") if self.n_train < n_cls: raise ValueError('The train_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_train, n_cls)) if self.n_test < n_cls: raise ValueError('The test_size = %d should be greater or ' 'equal to the number of classes = %d' % (self.n_test, n_cls)) def _iter_indices(self): rng = check_random_state(self.random_state) cls_count = bincount(self.y_indices) p_i = cls_count / float(self.n) n_i = np.round(self.n_train * p_i).astype(int) t_i = np.minimum(cls_count - n_i, np.round(self.n_test * p_i).astype(int)) for n in range(self.n_iter): train = [] test = [] for i, cls in enumerate(self.classes): permutation = rng.permutation(cls_count[i]) cls_i = np.where((self.y == cls))[0][permutation] train.extend(cls_i[:n_i[i]]) test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]]) # Because of rounding issues (as n_train and n_test are not # dividers of the number of elements per class), we may end # up here with less samples in train and test than asked for. if len(train) < self.n_train or len(test) < self.n_test: # We complete by affecting randomly the missing indexes missing_idx = np.where(bincount(train + test, minlength=len(self.y)) == 0, )[0] missing_idx = rng.permutation(missing_idx) train.extend(missing_idx[:(self.n_train - len(train))]) test.extend(missing_idx[-(self.n_test - len(test)):]) train = rng.permutation(train) test = rng.permutation(test) yield train, test def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.y, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter class PredefinedSplit(_PartitionIterator): """Predefined split cross validation iterator Splits the data into training/test set folds according to a predefined scheme. Each sample can be assigned to at most one test set fold, as specified by the user through the ``test_fold`` parameter. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- test_fold : "array-like, shape (n_samples,) test_fold[i] gives the test set fold of sample i. A value of -1 indicates that the corresponding sample is not part of any test set folds, but will instead always be put into the training fold. Examples -------- >>> from sklearn.cross_validation import PredefinedSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([0, 0, 1, 1]) >>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1]) >>> len(ps) 2 >>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1]) >>> for train_index, test_index in ps: ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [1 2 3] TEST: [0] TRAIN: [0 2] TEST: [1 3] """ def __init__(self, test_fold): super(PredefinedSplit, self).__init__(len(test_fold)) self.test_fold = np.array(test_fold, dtype=np.int) self.test_fold = column_or_1d(self.test_fold) self.unique_folds = np.unique(self.test_fold) self.unique_folds = self.unique_folds[self.unique_folds != -1] def _iter_test_indices(self): for f in self.unique_folds: yield np.where(self.test_fold == f)[0] def __repr__(self): return '%s.%s(test_fold=%s)' % ( self.__class__.__module__, self.__class__.__name__, self.test_fold) def __len__(self): return len(self.unique_folds) class LabelShuffleSplit(ShuffleSplit): '''Shuffle-Labels-Out cross-validation iterator Provides randomized train/test indices to split data according to a third-party provided label. This label information can be used to encode arbitrary domain specific stratifications of the samples as integers. For instance the labels could be the year of collection of the samples and thus allow for cross-validation against time-based splits. The difference between LeavePLabelOut and LabelShuffleSplit is that the former generates splits using all subsets of size ``p`` unique labels, whereas LabelShuffleSplit generates a user-determined number of random test splits, each with a user-determined fraction of unique labels. For example, a less computationally intensive alternative to ``LeavePLabelOut(labels, p=10)`` would be ``LabelShuffleSplit(labels, test_size=10, n_iter=100)``. Note: The parameters ``test_size`` and ``train_size`` refer to labels, and not to samples, as in ShuffleSplit. Parameters ---------- labels : array, [n_samples] Labels of samples n_iter : int (default 5) Number of re-shuffling & splitting iterations. test_size : float (default 0.2), int, or None If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the test split. If int, represents the absolute number of test labels. If None, the value is automatically set to the complement of the train size. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the labels to include in the train split. If int, represents the absolute number of train labels. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. ''' def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None, random_state=None): classes, label_indices = np.unique(labels, return_inverse=True) super(LabelShuffleSplit, self).__init__( len(classes), n_iter=n_iter, test_size=test_size, train_size=train_size, random_state=random_state) self.labels = labels self.classes = classes self.label_indices = label_indices def __repr__(self): return ('%s(labels=%s, n_iter=%d, test_size=%s, ' 'random_state=%s)' % ( self.__class__.__name__, self.labels, self.n_iter, str(self.test_size), self.random_state, )) def __len__(self): return self.n_iter def _iter_indices(self): for label_train, label_test in super(LabelShuffleSplit, self)._iter_indices(): # these are the indices of classes in the partition # invert them into data indices train = np.flatnonzero(np.in1d(self.label_indices, label_train)) test = np.flatnonzero(np.in1d(self.label_indices, label_test)) yield train, test ############################################################################## def _index_param_value(X, v, indices): """Private helper function for parameter value indexing.""" if not _is_arraylike(v) or _num_samples(v) != _num_samples(X): # pass through: skip indexing return v if sp.issparse(v): v = v.tocsr() return safe_indexing(v, indices) def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Generate cross-validated estimates for each input data point Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- preds : ndarray This is the result of calling 'predict' """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y, train, test, verbose, fit_params) for train, test in cv) preds = [p for p, _ in preds_blocks] locs = np.concatenate([loc for _, loc in preds_blocks]) if not _check_is_partition(locs, _num_samples(X)): raise ValueError('cross_val_predict only works for partitions') inv_locs = np.empty(len(locs), dtype=int) inv_locs[locs] = np.arange(len(locs)) # Check for sparse predictions if sp.issparse(preds[0]): preds = sp.vstack(preds, format=preds[0].format) else: preds = np.concatenate(preds) return preds[inv_locs] def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params): """Fit estimator and predict values for a given dataset split. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. Returns ------- preds : sequence Result of calling 'estimator.predict' test : array-like This is the value of the test parameter """ # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) X_train, y_train = _safe_split(estimator, X, y, train) X_test, _ = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) preds = estimator.predict(X_test) return preds, test def _check_is_partition(locs, n): """Check whether locs is a reordering of the array np.arange(n) Parameters ---------- locs : ndarray integer array to test n : int number of expected elements Returns ------- is_partition : bool True iff sorted(locs) is range(n) """ if len(locs) != n: return False hit = np.zeros(n, bool) hit[locs] = True if not np.all(hit): return False return True def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'): """Evaluate a score by cross-validation Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params) for train, test in cv) return np.array(scores)[:, 0] class FitFailedWarning(RuntimeWarning): pass def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, return_train_score=False, return_parameters=False, error_score='raise'): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scorer : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape (n_train_samples,) Indices of training samples. test : array-like, shape (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. error_score : 'raise' (default) or numeric Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust length of sample weights fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, _index_param_value(X, v, train)) for k, v in fit_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) try: if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) except Exception as e: if error_score == 'raise': raise elif isinstance(error_score, numbers.Number): test_score = error_score if return_train_score: train_score = error_score warnings.warn("Classifier fit failed. The score on this train-test" " partition for these parameters will be set to %f. " "Details: \n%r" % (error_score, e), FitFailedWarning) else: raise ValueError("error_score must be the string 'raise' or a" " numeric value. (Hint: if using 'raise', please" " make sure that it has been spelled correctly.)" ) else: test_score = _score(estimator, X_test, y_test, scorer) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test) else: score = scorer(estimator, X_test, y_test) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def _permutation_test_score(estimator, X, y, cv, scorer): """Auxiliary function for permutation_test_score""" avg_score = [] for train, test in cv: estimator.fit(X[train], y[train]) avg_score.append(scorer(estimator, X[test], y[test])) return np.mean(avg_score) def _shuffle(y, labels, random_state): """Return a shuffled copy of y eventually shuffle among same labels.""" if labels is None: ind = random_state.permutation(len(y)) else: ind = np.arange(len(labels)) for label in np.unique(labels): this_mask = (labels == label) ind[this_mask] = random_state.permutation(ind[this_mask]) return y[ind] def check_cv(cv, X=None, y=None, classifier=False): """Input checker utility for building a CV in a user friendly way. Parameters ---------- cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. X : array-like The data the cross-val object will be applied on. y : array-like The target variable for a supervised learning problem. classifier : boolean optional Whether the task is a classification task, in which case stratified KFold will be used. Returns ------- checked_cv: a cross-validation generator instance. The return value is guaranteed to be a cv generator instance, whatever the input type. """ is_sparse = sp.issparse(X) if cv is None: cv = 3 if isinstance(cv, numbers.Integral): if classifier: if type_of_target(y) in ['binary', 'multiclass']: cv = StratifiedKFold(y, cv) else: cv = KFold(_num_samples(y), cv) else: if not is_sparse: n_samples = len(X) else: n_samples = X.shape[0] cv = KFold(n_samples, cv) return cv def permutation_test_score(estimator, X, y, cv=None, n_permutations=100, n_jobs=1, labels=None, random_state=0, verbose=0, scoring=None): """Evaluate the significance of a cross-validated score with permutations Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. n_permutations : integer, optional Number of times to permute ``y``. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. labels : array-like of shape [n_samples] (optional) Labels constrain the permutation among groups of samples with a same label. random_state : RandomState or an int seed (0 by default) A random number generator instance to define the state of the random permutations generator. verbose : integer, optional The verbosity level. Returns ------- score : float The true score without permuting targets. permutation_scores : array, shape (n_permutations,) The scores obtained for each permutations. pvalue : float The returned value equals p-value if `scoring` returns bigger numbers for better scores (e.g., accuracy_score). If `scoring` is rather a loss function (i.e. when lower is better such as with `mean_squared_error`) then this is actually the complement of the p-value: 1 - p-value. Notes ----- This function implements Test 1 in: Ojala and Garriga. Permutation Tests for Studying Classifier Performance. The Journal of Machine Learning Research (2010) vol. 11 """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) random_state = check_random_state(random_state) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. score = _permutation_test_score(clone(estimator), X, y, cv, scorer) permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_permutation_test_score)( clone(estimator), X, _shuffle(y, labels, random_state), cv, scorer) for _ in range(n_permutations)) permutation_scores = np.array(permutation_scores) pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1) return score, permutation_scores, pvalue permutation_test_score.__test__ = False # to avoid a pb with nosetests def train_test_split(*arrays, **options): """Split arrays or matrices into random train and test subsets Quick utility that wraps input validation and ``next(iter(ShuffleSplit(n_samples)))`` and application to input data into a single call for splitting (and optionally subsampling) data in a oneliner. Read more in the :ref:`User Guide <cross_validation>`. Parameters ---------- *arrays : sequence of arrays or scipy.sparse matrices with same shape[0] Python lists or tuples occurring in arrays are converted to 1D numpy arrays. test_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the absolute number of test samples. If None, the value is automatically set to the complement of the train size. If train size is also None, test size is set to 0.25. train_size : float, int, or None (default is None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the absolute number of train samples. If None, the value is automatically set to the complement of the test size. random_state : int or RandomState Pseudo-random number generator state used for random sampling. stratify : array-like or None (default is None) If not None, data is split in a stratified fashion, using this as the labels array. Returns ------- splitting : list of arrays, length=2 * len(arrays) List containing train-test split of input array. Examples -------- >>> import numpy as np >>> from sklearn.cross_validation import train_test_split >>> X, y = np.arange(10).reshape((5, 2)), range(5) >>> X array([[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) >>> list(y) [0, 1, 2, 3, 4] >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, random_state=42) ... >>> X_train array([[4, 5], [0, 1], [6, 7]]) >>> y_train [2, 0, 3] >>> X_test array([[2, 3], [8, 9]]) >>> y_test [1, 4] """ n_arrays = len(arrays) if n_arrays == 0: raise ValueError("At least one array required as input") test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) dtype = options.pop('dtype', None) if dtype is not None: warnings.warn("dtype option is ignored and will be removed in 0.18.", DeprecationWarning) allow_nd = options.pop('allow_nd', None) allow_lists = options.pop('allow_lists', None) stratify = options.pop('stratify', None) if allow_lists is not None: warnings.warn("The allow_lists option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if options: raise TypeError("Invalid parameters passed: %s" % str(options)) if allow_nd is not None: warnings.warn("The allow_nd option is deprecated and will be " "assumed True in 0.18 and removed.", DeprecationWarning) if allow_lists is False or allow_nd is False: arrays = [check_array(x, 'csr', allow_nd=allow_nd, force_all_finite=False, ensure_2d=False) if x is not None else x for x in arrays] if test_size is None and train_size is None: test_size = 0.25 arrays = indexable(*arrays) if stratify is not None: cv = StratifiedShuffleSplit(stratify, test_size=test_size, train_size=train_size, random_state=random_state) else: n_samples = _num_samples(arrays[0]) cv = ShuffleSplit(n_samples, test_size=test_size, train_size=train_size, random_state=random_state) train, test = next(iter(cv)) return list(chain.from_iterable((safe_indexing(a, train), safe_indexing(a, test)) for a in arrays)) train_test_split.__test__ = False # to avoid a pb with nosetests
bsd-3-clause
zhonghualiu/FaST-LMM
fastlmm/util/util.py
1
22211
import scipy as sp import numpy as np import scipy.stats as st import pdb import warnings import logging import sys import matplotlib matplotlib.use('Agg') #This lets it work even on machines without graphics displays import matplotlib.pyplot as plt def thin_results_file(myfile,dup_postfix="v2"): ''' Used in score vs lrt to remove any lines in the results ending with "v2", as these were replicate gene set entries. ''' sets = np.loadtxt(myfile,dtype=str,comments=None) nodup_ind = [] dup_ind = [] #indexes of non-duplicates, as indicated by dup_postfix for i in range(0,sets.shape[0]): tmpset=sets[i,0] if tmpset[-2:]!=dup_postfix: nodup_ind.append(i) else: dup_ind.append(i) sets_nodup = sets[nodup_ind] print "%i reps, and %i non-reps" % (len(dup_ind),len(nodup_ind)) return sets_nodup def compare_files(file1,file2,tol=1e-8,delimiter="\t"): ''' Given two files, compare the contents, including numbers up to absolute tolerance, tol Returns: val,msg where val is True/False (true means files to compare to each other) and a msg for the failure. ''' dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None) dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None) ncol1=dat1[0].size ncol2=dat2[0].size if ncol1!=ncol2: return False,"num columns do not match up" try: head1=dat1[0,:] head2=dat2[0,:] except: #file contains just a single column. return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1) #logging.warn("DO headers match up? (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2)) if not sp.all(head1==head2): return False, "headers do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, head1,head2) for c in range(ncol1): checked=False col1=dat1[1:,c] col2=dat2[1:,c] try: #if it is numeric col1=sp.array(col1,dtype='float64') col2=sp.array(col2,dtype='float64') except Exception: # if it is a string pass if not sp.all(col1==col2): return False, "string column %s does not match" % head1[c] checked=True #if it is numeric if not checked: absdiff=sp.absolute(col1-col2) if sp.any(absdiff>tol): try: return False, "numeric column %s does diff of %e not match within tolerance %e" % (head1[c],max(absdiff), tol) except: return False, "Error trying to print error message while comparing '{0}' and '{1}'".format(file1,file2) return True, "files are comparable within abs tolerance=%e" % tol def compare_mixed_files(file1,file2,tol=1e-8,delimiter="\t"): ''' Given two files, compare the contents, including numbers up to absolute tolerance, tol Returns: val,msg where val is True/False (true means files to compare to each other) and a msg for the failure. ''' dat1=sp.loadtxt(file1,dtype='str',delimiter=delimiter,comments=None) dat2=sp.loadtxt(file2,dtype='str',delimiter=delimiter,comments=None) ncol1=dat1[0].size ncol2=dat2[0].size if ncol1!=ncol2: return False,"num columns do not match up" try: r_count = dat1.shape[0] c_count = dat1.shape[1] except: #file contains just a single column. return sp.all(dat1==dat2), "single column result doesn't match exactly ('{0}')".format(file1) for r in xrange(r_count): for c in xrange(c_count): val1 = dat1[r,c] val2 = dat2[r,c] if val1!=val2: try: f1 = float(val1) f2 = float(val2) except: return False, "Values do not match up (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2) if abs(f1-f2) > tol: return False, "Values too different (file='{0}', '{1}' =?= '{2}')".format(file1, val1, val2) return True, "files are comparable within abs tolerance=%e" % tol #could make this more efficient by reading in blocks of SNPs, as in #FastLmmSet.py:KfromAltSnps() def write_kernel(iid,K,fileout): ''' writes out kernel assumes that iid contains a list of the ids, or else a list of [famid, personid] which then get merged with a space in between ''' nInd = K.shape[0] header = 'var' iid_merged = [] # first line contains iids for i in range(nInd): if iid.ndim==1 or iid.shape[1]==1: header += '\t%s'%(iid[i]) iid_merged.append('%s'%(iid[i])) else: header += '\t%s %s'%(iid[i,0],iid[i,1]) iid_merged.append('%s %s'%(iid[i,0],iid[i,1])) # each row of the matrix is one line f = open(fileout,'w') f.write(header+'\n') for i in range(nInd): row = ['\t%.4f'%x for x in K[i,:]] f.write('%s%s\n'%(iid_merged[i],''.join(row))) f.close() def write_plink_covariates(iid,X,fileout): ''' writes out plink-style covariates/phen file assuming that X is [N,M] for N individuals and M features assumes that iid contains a list of [famid, personid] ''' [nInd,M] = X.shape # each row of the matrix is one line f = open(fileout,'w') for i in range(nInd): row = ['\t%.4f'%x for x in X[i,:]] f.write('%s\t%s%s\n'%(iid[i,0],iid[i,1],''.join(row))) f.close() def combineseeds(seed1,seed2): import hashlib import sys seed=int(hashlib.md5(str(seed1) + "_" + str(seed2)).hexdigest()[-8:], 16) #as of numpy 1.9, seeds must be 32-bit, so keep only the 8 right-most hex digits return seed def standardize_col(dat,meanonly=False): ''' Mean impute each columns of an array. ''' colmean=st.nanmean(dat) if ~meanonly: colstd=st.nanstd(dat) else: colstd=None ncol=dat.shape[1] nmissing=sp.zeros((ncol)) datimp=sp.empty_like(dat); datimp[:]=dat for c in sp.arange(0,ncol): datimp[sp.isnan(datimp[:,c]),c]=colmean[c] datimp[:,c]=datimp[:,c]-colmean[c] if not meanonly: if colstd[c]>1e-6: datimp[:,c]=datimp[:,c]/colstd[c] else: print "warning: colstd=" + colstd[c] + " during normalization" nmissing[c]=float(sp.isnan(dat[:,c]).sum()) fracmissing=nmissing/dat.shape[0] return datimp,fracmissing def extractcols(filein,colnameset=None,dtypeset=None): if colnameset is None: raise Exception("must specify column names to read") import pandas as pd data=pd.read_csv(filein,delimiter = '\t',dtype=dtypeset,usecols=colnameset) r={} for j in sp.arange(0,len(colnameset)): name=colnameset.pop() r[name]=(data[name].values) return r def argintersect_left(a, b): """ find indices in a, whose corresponding values are in b ---------------------------------------------------------------------- Input: a : array, for which indices are returned that are in the intersect with b b : array to be intersected with a ---------------------------------------------------------------------- Output: the indices of elements of a, which are in intersect of a and b ---------------------------------------------------------------------- """ return sp.arange(a.shape[0])[sp.in1d(a,b)] def intersect_ids(idslist,sep="Q_Q"): ''' Takes a list of 2d string arrays of family and individual ids. These are intersected. "sep" is used to concatenate the family and individual ids into one unique string Returns: indarr, an array of size N x L, where N is the number of individuals in the intersection, and L is the number of lists in idslist, and which contains the index to use (in order) such that all people will be identical and in order across all data sets. If one of the lists=None, it is ignored (but still has values reported in indarr, all equal to -1), but the first list must not be None. ''' #!!warnings.warn("This intersect_ids is deprecated. Pysnptools includes newer versions of intersect_ids", DeprecationWarning) id2ind={} L=len(idslist) observed=sp.zeros(L,dtype='bool') for l, id_list in enumerate(idslist): if id_list is not None: observed[l]=1 if l==0: if ~observed[l]: raise Exception("first list must be non-empty") else: for i in xrange(id_list.shape[0]): id=id_list[i,0] +sep+ id_list[i,1] entry=sp.zeros(L)*sp.nan #id_list to contain the index for this id, for all lists provided entry[l]=i #index for the first one id2ind[id]=entry elif observed[l]: for i in xrange(id_list.shape[0]): id=id_list[i,0] +sep+ id_list[i,1] if id2ind.has_key(id): id2ind[id][l]=i indarr=sp.array(id2ind.values(),dtype='float') #need float because may contain NaNs indarr[:,~observed]=-1 #replace all Nan's from empty lists to -1 inan = sp.isnan(indarr).any(1) #find any rows that contain at least one Nan indarr=indarr[~inan] #keep only rows that are not NaN indarr=sp.array(indarr,dtype='int') #convert to int so can slice return indarr def indof_constfeatures(X,axis=0): ''' Assumes features are columns (by default, but can do rows), and checks to see if all features are simply constants, such that it is equivalent to a bias and nothing else ''' featvar=sp.var(X,axis=axis) badind = sp.nonzero(featvar==0)[0] return badind def constfeatures(X,axis=0): ''' Assumes features are columns (by default, but can do rows), and checks to see if all features are simply constants, such that it is equivalent to a bias and nothing else ''' featmeans=sp.mean(X,axis=axis) return (X-featmeans==0).all() def appendtofilename(filename,midfix,sep="."): import os dir, fileext = os.path.split(filename) file, extension = os.path.splitext(fileext) infofilename = dir + os.path.sep + file + sep + midfix + extension return infofilename def datestamp(appendrandom=False): import datetime now = datetime.datetime.now() s = str(now)[:19].replace(" ","_").replace(":","_") if appendrandom: import random s += "_" + str(random.random())[2:] return s #not needed, just use the sp RandomState.permutation #def permute(numbersamples): # perm = sp.random.permutation(numbersamples) # return perm #Not needed because enumerate is built in to the language #def appendindex(iter): # index = -1; # for item in iter: # index += 1 # yield item, index def create_directory_if_necessary(name, isfile=True): import os if isfile: directory_name = os.path.dirname(name) else: directory_name = name if directory_name != "": try: os.makedirs(directory_name) except OSError, e: if not os.path.isdir(directory_name): raise Exception("not valid path: '{0}'. (Working directory is '{1}'".format(directory_name,os.getcwd())) def which(vec): ''' find the True from the index 0 with bool vector vec ---------------------------------------------------------------------- Input: vec : vector of bool ---------------------------------------------------------------------- Output: index of the first True from the bool vector vec ---------------------------------------------------------------------- ''' for i, item in enumerate(vec): if (item): return(i) return(-1) def which_opposite(vec): ''' find the True from the index 0 with bool vector vec ---------------------------------------------------------------------- Input: vec : vector of bool ---------------------------------------------------------------------- Output: index of the last True from the bool vector vec ---------------------------------------------------------------------- ''' for i in reversed(xrange(len(vec))): item = vec[i] if (item): return(i) return(-1) def generatePermutation(numbersamples,randomSeedOrState): from numpy.random import RandomState if isinstance(randomSeedOrState,RandomState): randomstate = randomSeedOrState else: randomstate = RandomState(int(randomSeedOrState % sys.maxint)) perm = randomstate.permutation(numbersamples) return perm def excludeinds(pos0, pos1, mindist = 10.0,idist = 2): ''' get the indices of SNPs that have to be excluded from the set of null SNPs when testing alternative SNPs to correct for proximal contamination. -------------------------------------------------------------------------- Input: pos0 : [S0*3] array of null-model SNP positions pos1 : [S0*3] array of alternative-model SNP positions idist : index in pos array that the exclusion is based on. (1=genetic distance, 2=basepair distance) -------------------------------------------------------------------------- Output: i_exclude : [S] 1-D boolean array indicating excluson of SNPs (True: exclude, False: do not exclude) -------------------------------------------------------------------------- ''' chromosomes1 = sp.unique(pos1[:,0]) i_exclude = sp.zeros(pos0[:,0].shape[0],dtype = 'bool') if (mindist>=0.0): for ichr in xrange(chromosomes1.shape[0]): i_SNPs1_chr=pos1[:,0] == chromosomes1[ichr] i_SNPs0_chr=pos0[:,0] == chromosomes1[ichr] pos1_ = pos1[i_SNPs1_chr,idist] pos0_ = pos0[i_SNPs0_chr,idist] distmatrix = pos1_[sp.newaxis,:] - pos0_[:,sp.newaxis] i_exclude[i_SNPs0_chr] = (sp.absolute(distmatrix)<=mindist).any(1) return i_exclude def dotDotRange(dotDotString): ''' A method for generating integers. For example: > for i in util.dotDotRange("1..4,100,-1..1"): print i 1 2 3 4 100 -1 0 1 ''' for intervalString in dotDotString.split(","): parts = intervalString.split("..") if len(parts) > 2 : raise Exception("Expect at most one '..' between commas. (see {0})".format(intervalString)) start = int(parts[0]) if len(parts) == 1: yield start else: lastInclusive = int(parts[1]) for i in xrange(start,lastInclusive+1): yield i def _run_length_encode(seq): count = 0 previous = None for item in seq: if count == 0: count = 1 previous = item elif item == previous: count += 1 else: yield previous, count previous = item count =1 if count > 0: yield previous, count def _rel_to_midpoint(rle): previous_count = 0 for item, count in rle: yield previous_count + count // 2 previous_count += count def _color_list(chr_list,rle): chr_to_index = dict((chr,index) for index,(chr,count) in enumerate(rle)) index_to_color = {0:"b",1:"g"} result = [index_to_color[chr_to_index[chr]%len(index_to_color)] for chr in chr_list] return result def manhattan_plot(chr_pos_pvalue_array,pvalue_line=None,plot_threshold=1.0,vline_significant=False,marker="o", chromosome_starts=None, xaxis_unit_bp=True, alpha=0.5): """ Function to create a Manhattan plot. See http://en.wikipedia.org/wiki/Manhattan_plot. Args: chr_pos_pvalue_array: an n x 3 numpy array. The three columns are the chrom number (as a number), the position, and pvalue. :type chr_pos_pvalue_array: numpy array pvalue_line: (Default: None). If given, draws a line at that PValue. :type pvalue_line: a 'pheno dictionary' or a string plot_threshold: plot only SNPs that achieve a P-value smaller than pvalue_threshold to speed up plotting vline_significant: boolean. Draw a vertical line at each significant Pvalue? :rtype: none, but changes the global current figure. marker: marker for the scatter plot. default: "o" chromosome_starts: [Nchrom x 3] ndarray: chromosome, cumulative start position, cumulative stop position cumulative chromosome starts, for plotting. If None (default), this is estimated from data xaxis_unit_bp: plot cumulative position in basepair units on x axis? If False, only use rank of SNP positions. (default: True) alpha: alpha (opaquness) for P-value markers in scatterplot (default 0.5) Returns: chromosome_starts [Nchrom x 3] ndarray: chromosome, cumulative start position, cumulative stop position cumulative chromosome starts used in plotting. :Example: >>> from fastlmm.association import single_snp_leave_out_one_chrom >>> from pysnptools.snpreader import Bed >>> import matplotlib.pyplot as plt >>> import fastlmm.util.util as flutil >>> pheno_fn = "../feature_selection/examples/toydata.phe" >>> results_dataframe = single_snp_leave_out_one_chrom(test_snps="../feature_selection/examples/toydata.5chrom", pheno=pheno_fn, h2=.2) >>> chromosome_starts = flutil.manhattan_plot(results_dataframe.as_matrix(["Chr", "ChrPos", "PValue"]),pvalue_line=1e-7) >>> #plt.show() """ # create a copy of the data and sort it by chrom and then position array = np.array(chr_pos_pvalue_array) if plot_threshold: array = array[array[:,2]<=plot_threshold] else: plot_threshold = 1.0 array=array[np.argsort(array[:,1]),:] #sort by ChrPos array=array[np.argsort(array[:,0],kind='mergesort'),:] #Finally, sort by Chr (but keep ChrPos in case of ties) rle = list(_run_length_encode(array[:,0])) if xaxis_unit_bp: #compute and use cumulative basepair positions for x-axis if chromosome_starts is None: chromosome_starts = _compute_x_positions_chrom(array) chr_pos_list = _compute_x_positions_snps(array, chromosome_starts) plt.xlim([0,chromosome_starts[-1,2]+1]) plt.xticks(chromosome_starts[:,1:3].mean(1),chromosome_starts[:,0]) else: #use rank indices for x-axis chr_pos_list = np.arange(array.shape[0]) xTickMarks = [str(int(item)) for item,count in rle] plt.xlim([0,array.shape[0]]) plt.xticks(list(_rel_to_midpoint(rle)), xTickMarks) y = -np.log10(array[:,2]) max_y = y.max() if pvalue_line and vline_significant: #mark significant associations (ones that pass the pvalue_line) by a red vertical line: idx_significant = array[:,2]<pvalue_line if np.any(idx_significant): y_significant = y[idx_significant] chr_pos_list_significant = chr_pos_list[idx_significant] for i in xrange(len(chr_pos_list_significant)): plt.axvline(x=chr_pos_list_significant[i],ymin = 0.0, ymax = y_significant[i], color = 'r',alpha=0.8) plt.scatter(chr_pos_list,y,marker=marker,c=_color_list(array[:,0],rle),edgecolor='none',s=y/max_y*20+0.5, alpha=alpha) plt.xlabel("chromosome") plt.ylabel("-log10(P value)") if pvalue_line: plt.axhline(-np.log10(pvalue_line),linestyle="--",color='gray') plt.ylim([-np.log10(plot_threshold),None]) return chromosome_starts def _compute_x_positions_chrom(positions, offset=1e5): chromosomes = np.unique(positions[:,0]) chromosomes.sort() chromosome_starts = np.zeros((chromosomes.shape[0],3),dtype="object") chr_start_next = 0 for i, chromosome in enumerate(chromosomes): pos_chr = positions[positions[:,0]==chromosome] chromosome_starts[i,0] = chromosome #the chromosome chromosome_starts[i,1] = chr_start_next #start of the chromosome chromosome_starts[i,2] = chr_start_next + pos_chr.max() #end of the chromosome chr_start_next = chromosome_starts[i,2] + offset return chromosome_starts def _compute_x_positions_snps(positions, chromosome_starts): cumulative_pos = np.zeros(positions.shape[0]) for i, chromosome_start in enumerate(chromosome_starts): idx_chr = positions[:,0]==chromosome_start[0] cumulative_pos[idx_chr] = positions[idx_chr][:,1] + chromosome_start[1] return cumulative_pos if __name__ == "__main__": logging.basicConfig(level=logging.INFO) import doctest doctest.testmod()
apache-2.0
suyashbire1/pyhton_scripts_mom6
plot_twamomx_budget_complete_direct_newest.py
2
27301
import importlib import sys import time import matplotlib.pyplot as plt import mom_plot1 import numpy as np import pyximport import readParams_moreoptions as rdp1 from getvaratzc import getvaratzc, getvaratzc5 from netCDF4 import Dataset as dset from netCDF4 import MFDataset as mfdset from pym6 import Domain, Plotter, Variable importlib.reload(mom_plot1) m6plot = mom_plot1.m6plot xdegtokm = mom_plot1.xdegtokm pyximport.install() importlib.reload(Domain) importlib.reload(Variable) importlib.reload(Plotter) gv = Variable.GridVariable def extract_twamomx_terms_pym6(initializer): domain = Domain.Domain(initializer) plot_loc = 'ul' with mfdset(initializer.fil) as fh, mfdset(initializer.fil2) as fh2: h = (gv('h_Cu', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) ur = gv('uh', domain, plot_loc, fh2, fh, plot_loc=plot_loc, divisor='h_Cu', name=r'$\hat{u}$').read_array( divide_by_dy=True, filled=0) urx = (gv('uh', domain, plot_loc, fh2, fh, plot_loc=plot_loc, divisor='h_Cu').xsm().xep().read_array( extend_kwargs={'method': 'symmetric'}, divide_by_dy=True, filled=0).ddx(3).move_to(plot_loc)) ury = (gv('uh', domain, plot_loc, fh2, fh, plot_loc=plot_loc, divisor='h_Cu').ysm().yep().read_array( extend_kwargs={'method': 'vorticity'}, divide_by_dy=True, filled=0).ddx(2).move_to(plot_loc)) humx = (gv('uh', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .xsm().xep().read_array( extend_kwargs={'method': 'symmetric'}, filled=0).ddx(3, div_by_area=True).move_to(plot_loc)) hvm = (gv('vh', domain, 'vl', fh2, fh, plot_loc=plot_loc).ysm().xep() .read_array( divide_by_dx=True, filled=0, extend_kwargs={'method': 'vorticity'}) .move_to('hl').move_to(plot_loc)) hvmy = (gv('vh', domain, 'vl', fh2, fh, plot_loc=plot_loc).ysm().xep() .read_array( filled=0, extend_kwargs={'method': 'vorticity'}) .ddx(2, div_by_area=True).move_to(plot_loc)) huuxphuvym = ( (gv('twa_huuxpt', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) + (gv('twa_huvymt', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0))) huuxm = (gv('huu_Cu', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .xsm().xep().read_array( extend_kwargs={'method': 'symmetric'}, filled=0).ddx(3, div_by_area=True).move_to(plot_loc)) huvym = huuxphuvym + huuxm urb = (gv('uh', domain, plot_loc, fh2, fh, plot_loc=plot_loc, divisor='h_Cu').lsm().lep().read_array( extend_kwargs={'method': 'mirror'}, divide_by_dy=True, filled=0).ddx(1).move_to('ul')) hwb = (gv('wd', domain, 'hi', fh2, fh, plot_loc=plot_loc) .xep().read_array( filled=0, extend_kwargs={'method': 'vorticity'}) .o1diff(1).move_to(plot_loc)) hwm = (gv('wd', domain, 'hi', fh2, fh, plot_loc=plot_loc) .xep().read_array( filled=0, extend_kwargs={'method': 'vorticity'}) .move_to('hl').move_to(plot_loc)) * domain.db esq = (gv('esq', domain, 'hl', fh2, fh, plot_loc=plot_loc) .xep().read_array(extend_kwargs={'method': 'mirror'})) e = (gv('e', domain, 'hi', fh2, fh, plot_loc=plot_loc) .xep().read_array(extend_kwargs={'method': 'mirror'}) .move_to('hl')) edlsqm = esq - e * e edlsqmx = edlsqm.ddx(3) hpfu = (gv('twa_hpfu', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) pfum = (gv('PFu', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) edpfudmb = -hpfu + h * pfum - edlsqmx * domain.db * 0.5 hfvm = (gv('twa_hfv', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) huwbm = (gv('twa_huwb', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) hdiffum = ( gv('twa_hdiffu', domain, plot_loc, fh2, fh, plot_loc=plot_loc) .read_array(filled=0)) hdudtviscm = (gv('twa_hdudtvisc', domain, plot_loc, fh2, fh, plot_loc=plot_loc).read_array(filled=0)) advx = ur * urx advy = hvm * ury / h advb = hwm * urb / h cor = hfvm / h pfum = pfum xdivep1 = -huuxm / h xdivep2 = advx xdivep3 = ur * humx / h xdivep4 = -edlsqmx / 2 * domain.db / h xdivep = (xdivep1 + xdivep2 + xdivep3 + xdivep4) ydivep1 = huvym / h ydivep2 = advy ydivep3 = ur * hvmy / h ydivep = (ydivep1 + ydivep2 + ydivep3) bdivep1 = huwbm / h bdivep2 = advb bdivep3 = ur * hwb / h bdivep4 = -edpfudmb / h bdivep = (bdivep1 + bdivep2 + bdivep3 + bdivep4) X1twa = hdiffum / h X2twa = hdudtviscm / h budgetlist = [ -advx, -advy, -advb, cor, pfum, xdivep1 + xdivep2 + xdivep3, xdivep4, ydivep, bdivep1 + bdivep2 + bdivep3, bdivep4, X1twa, X2twa ] ti = [ '(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)', '(k)', '(l)' ] lab = [ r'$-\hat{u}\hat{u}_{\tilde{x}}$', r'$-\hat{v}\hat{u}_{\tilde{y}}$', r'$-\hat{\varpi}\hat{u}_{\tilde{b}}$', r'$f\hat{v}$', r'$-\overline{m_{\tilde{x}}}$', r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} u ^{\prime \prime} })_{\tilde{x}}$""", r"""-$\frac{1}{2\overline{\zeta_{\tilde{b}}}}(\overline{\zeta ^{\prime 2}})_{\tilde{x}}$""", r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} v ^{\prime \prime}})_{\tilde{y}}$""", r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} \varpi ^{\prime \prime}})_{\tilde{b}}$""", r""" -$\frac{1}{2\overline{\zeta_{\tilde{b}}}}(\overline{\zeta ^\prime m_{\tilde{x}}^\prime})_{\tilde{b}}$""", r'$\widehat{X^H}$', r'$\widehat{X^V}$' ] for i, var in enumerate(budgetlist): var.name = lab[i] return budgetlist def plot_twamomx_pym6(initializer, **kwargs): budgetlist = extract_twamomx_terms_pym6(initializer) terms = kwargs.get('terms', None) if terms: budgetlist = [budgetlist[i] for i in terms] with mfdset(initializer.fil) as fh, mfdset(initializer.fil2) as fh2: e = (gv('e', budgetlist[0].dom, 'hi', fh2, fh, plot_loc='ul') .xep().read_array(extend_kwargs={'method': 'mirror'}) .move_to('ui')) plot_kwargs = dict(cmap='RdBu_r') plotter_kwargs = dict( xtokm=True, zcoord=True, z=initializer.z, e=e, isop_mean=True) ax, fig = Plotter.budget_plot( budgetlist, initializer.meanax, plot_kwargs=plot_kwargs, plotter_kwargs=plotter_kwargs, individual_cbars=False, **kwargs) swash = kwargs.get('swash', True) plotter_kwargs.pop('ax') if swash: initializer_for_swash = Domain.Initializer( geofil=initializer.geofil, vgeofil=initializer.vgeofil, wlon=-0.5, elon=0, slat=10, nlat=11) domain_for_swash0 = Domain.Domain(initializer_for_swash) with mfdset(initializer.fil3) as fh: islayerdeep = ( gv('islayerdeep', budgetlist[0].dom, 'ql', fh, plot_loc='ul') .ysm().read_array( tmean=False, extend_kwargs={'method': 'mirror'}).move_to('ul')) islayerdeep0 = ( gv('islayerdeep', domain_for_swash0, 'ql', fh, plot_loc='ul') .ysm().read_array( tmean=False, extend_kwargs={'method': 'mirror'}).move_to('ul')) swashperc = ( -islayerdeep / islayerdeep0.values[-1, 0, 0, 0] + 1) * 100 plot_kwargs = kwargs.get('swash_plot_kwargs', dict( colors='grey', linewidths=4)) for axc in ax.ravel(): axc, _ = swashperc.plot( 'nanmean', initializer.meanax, only_contour=True, clevs=np.array([1]), annotate='None', ax=axc, plot_kwargs=plot_kwargs, **plotter_kwargs) return ax, fig def extract_twamomx_terms(geofil, vgeofil, fil, fil2, xstart, xend, ystart, yend, zs, ze, meanax, fil3=None, alreadysaved=False, xyasindices=False, calledfrompv=False, htol=1e-3): if not alreadysaved: keepax = () for i in range(4): if i not in meanax: keepax += (i, ) fhvgeo = dset(vgeofil) db = -fhvgeo.variables['g'][:] dbi = np.append(db, 0) fhvgeo.close() fhgeo = dset(geofil) fh = mfdset(fil) fh2 = mfdset(fil2) zi = rdp1.getdims(fh)[2][0] dbl = np.diff(zi) * 9.8 / 1031 if xyasindices: (xs, xe), (ys, ye) = (xstart, xend), (ystart, yend) _, _, dimu = rdp1.getdimsbyindx( fh, xs, xe, ys, ye, zs=zs, ze=ze, ts=0, te=None, xhxq='xq', yhyq='yh', zlzi='zl') else: (xs, xe), (ys, ye), dimu = rdp1.getlatlonindx( fh, wlon=xstart, elon=xend, slat=ystart, nlat=yend, zs=zs, ze=ze, xhxq='xq') sl = np.s_[:, :, ys:ye, xs:xe] slmy = np.s_[:, :, ys - 1:ye, xs:xe] D, (ah, aq) = rdp1.getgeombyindx(fhgeo, xs, xe, ys, ye)[0:2] Dforgetutwaforxdiff = rdp1.getgeombyindx(fhgeo, xs - 1, xe, ys, ye)[0] Dforgetutwaforydiff = rdp1.getgeombyindx(fhgeo, xs, xe, ys - 1, ye + 1)[0] Dforgethvforydiff = rdp1.getgeombyindx(fhgeo, xs, xe, ys - 1, ye)[0] dxt, dyt = rdp1.getgeombyindx(fhgeo, xs, xe, ys, ye)[2][6:8] dxcu, dycu = rdp1.getgeombyindx(fhgeo, xs, xe, ys, ye)[2][0:2] dycuforxdiff = rdp1.getgeombyindx(fhgeo, xs - 1, xe, ys, ye)[2][1:2] dycuforydiff = rdp1.getgeombyindx(fhgeo, xs, xe, ys - 1, ye + 1)[2][1:2] dxbu, dybu = rdp1.getgeombyindx(fhgeo, xs, xe, ys, ye + 1)[2][4:6] aq1 = rdp1.getgeombyindx(fhgeo, xs, xe, ys - 1, ye)[1][1] ah1 = rdp1.getgeombyindx(fhgeo, xs - 1, xe, ys, ye)[1][0] dxcu1 = rdp1.getgeombyindx(fhgeo, xs, xe, ys - 1, ye + 1)[2][0] nt_const = dimu[0].size t0 = time.time() dt = fh.variables['average_DT'][:] dt = dt[:, np.newaxis, np.newaxis, np.newaxis] if fil3: fh3 = mfdset(fil3) slmytn = np.s_[-1:, :, ys - 1:ye, xs:xe] # islayerdeep0 = fh3.variables['islayerdeep'][:,0,0,0].sum() # islayerdeep = (fh3.variables['islayerdeep'][slmy].filled(np.nan)).sum(axis=0, # keepdims=True) islayerdeep0 = fh3.variables['islayerdeep'][-1:, 0, 0, 0] islayerdeep = (fh3.variables['islayerdeep'][slmytn].filled(np.nan)) swash = (islayerdeep0 - islayerdeep) / islayerdeep0 * 100 swash = 0.5 * (swash[:, :, :-1, :] + swash[:, :, 1:, :]) fh3.close() else: swash = None em = (fh2.variables['e'][0:, zs:ze, ys:ye, xs:xe] * dt).sum( axis=0, keepdims=True) / np.sum(dt) elm = 0.5 * (em[:, 0:-1, :, :] + em[:, 1:, :, :]) uh = (fh.variables['uh_masked'][0:, zs:ze, ys:ye, xs:xe].filled(np.nan) * dt).sum(axis=0, keepdims=True) / np.sum(dt) h_cu = (fh.variables['h_Cu'][0:, zs:ze, ys:ye, xs:xe].filled(0) * dt).sum(axis=0, keepdims=True) / np.sum(dt) h_cu[h_cu < htol] = np.nan h_um = h_cu utwa = uh / h_cu / dycu uhforxdiff = (fh.variables['uh_masked'][0:, zs:ze, ys:ye, xs - 1:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) h_cuforxdiff = (fh.variables['h_Cu'][0:, zs:ze, ys:ye, xs - 1:xe] * dt).filled(0).sum(axis=0, keepdims=True) / np.sum(dt) h_cuforxdiff[h_cuforxdiff < htol] = np.nan utwaforxdiff = uhforxdiff / h_cuforxdiff #/dycuforxdiff uhforydiff = ( fh.variables['uh_masked'][0:, zs:ze, ys - 1:ye + 1, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) h_cuforydiff = (fh.variables['h_Cu'][0:, zs:ze, ys - 1:ye + 1, xs:xe] * dt).filled(0).sum(axis=0, keepdims=True) / np.sum(dt) h_cuforydiff[h_cuforydiff < htol] = np.nan utwaforydiff = uhforydiff / h_cuforydiff #/dycuforydiff utwax = np.diff(np.nan_to_num(utwaforxdiff), axis=3) / dxt / dyt utwax = np.concatenate((utwax, -utwax[:, :, :, [-1]]), axis=3) utwax = 0.5 * (utwax[:, :, :, 0:-1] + utwax[:, :, :, 1:]) utway = np.diff(utwaforydiff, axis=2) / dxbu / dybu utway = 0.5 * (utway[:, :, 0:-1, :] + utway[:, :, 1:, :]) humx = np.diff(np.nan_to_num(uhforxdiff), axis=3) / dxt / dyt humx = np.concatenate((humx, -humx[:, :, :, [-1]]), axis=3) humx = 0.5 * (humx[:, :, :, 0:-1] + humx[:, :, :, 1:]) hvm = (fh.variables['vh_masked'][0:, zs:ze, ys - 1:ye, xs:xe] * dt).sum(axis=0, keepdims=True) / np.sum(dt) hvm = np.concatenate((hvm, -hvm[:, :, :, -1:]), axis=3) hvm = 0.25 * (hvm[:, :, :-1, :-1] + hvm[:, :, :-1, 1:] + hvm[:, :, 1:, :-1] + hvm[:, :, 1:, 1:]) / dxcu hv = (fh.variables['vh_masked'][0:, zs:ze, ys - 1:ye, xs:xe] * dt).sum( axis=0, keepdims=True) / np.sum(dt) hvmy = np.diff(hv, axis=2) / dxt / dyt hvmy = np.concatenate((hvmy, -hvmy[:, :, :, -1:]), axis=3) hvmy = 0.5 * (hvmy[:, :, :, :-1] + hvmy[:, :, :, 1:]) huuxphuvym = ( fh.variables['twa_huuxpt'][0:, zs:ze, ys:ye, xs:xe] * dt + fh.variables['twa_huvymt'][0:, zs:ze, ys:ye, xs:xe] * dt ).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) #u = (fh.variables['u_masked'][0:,zs:ze,ys:ye,xs-1:xe]*dt).filled(np.nan).sum(axis=0,keepdims=True)/np.sum(dt) huu = (fh.variables['huu_Cu'][0:, zs:ze, ys:ye, xs - 1:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) huuxm = np.diff(np.nan_to_num(huu), axis=3) / dxt / dyt huuxm = np.concatenate((huuxm, -huuxm[:, :, :, -1:]), axis=3) huuxm = 0.5 * (huuxm[:, :, :, :-1] + huuxm[:, :, :, 1:]) # huu = (fh.variables['huu_T'][0:,zs:ze,ys:ye,xs:xe]*dt).sum(axis=0,keepdims=True)/np.sum(dt)*dyt # huu = np.concatenate((huu,-huu[:,:,:,-1:]),axis=3) # huuxm = np.diff(huu,axis=3)/dxcu/dycu huvym = huuxphuvym + huuxm utwaforvdiff = np.concatenate((utwa[:, [0], :, :], utwa), axis=1) utwab = np.diff(utwaforvdiff, axis=1) / db[:, np.newaxis, np.newaxis] utwab = np.concatenate( (utwab, np.zeros(utwab[:, :1, :, :].shape)), axis=1) utwab = 0.5 * (utwab[:, 0:-1, :, :] + utwab[:, 1:, :, :]) hwb = (fh2.variables['wd'][0:, zs:ze, ys:ye, xs:xe] * dt).sum( axis=0, keepdims=True) / np.sum(dt) hwb = np.diff(hwb, axis=1) hwb = np.concatenate((hwb, -hwb[:, :, :, -1:]), axis=3) hwb_u = 0.5 * (hwb[:, :, :, :-1] + hwb[:, :, :, 1:]) hwb = (fh2.variables['wd'][0:, zs:ze, ys:ye, xs:xe] * dt).sum( axis=0, keepdims=True) / np.sum(dt) hwm = 0.5 * (hwb[:, :-1] + hwb[:, 1:]) * dbl[:, np.newaxis, np.newaxis] hwm = np.concatenate((hwm, -hwm[:, :, :, -1:]), axis=3) hwm_u = 0.5 * (hwm[:, :, :, :-1] + hwm[:, :, :, 1:]) esq = (fh.variables['esq'][0:, zs:ze, ys:ye, xs:xe] * dt).sum( axis=0, keepdims=True) / np.sum(dt) edlsqm = (esq - elm**2) edlsqm = np.concatenate((edlsqm, edlsqm[:, :, :, -1:]), axis=3) edlsqmx = np.diff(edlsqm, axis=3) / dxcu hpfu = (fh.variables['twa_hpfu'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) pfum = (fh2.variables['PFu'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) edpfudmb = -hpfu + h_cu * pfum - 0.5 * edlsqmx * dbl[:, np.newaxis, np. newaxis] hfvm = (fh.variables['twa_hfv'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) huwbm = (fh.variables['twa_huwb'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) hdiffum = (fh.variables['twa_hdiffu'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) hdudtviscm = (fh.variables['twa_hdudtvisc'][0:, zs:ze, ys:ye, xs:xe] * dt).filled(np.nan).sum(axis=0, keepdims=True) / np.sum(dt) fh2.close() fh.close() advx = utwa * utwax advy = hvm * utway / h_um advb = hwm_u * utwab / h_um cor = hfvm / h_um pfum = pfum xdivep1 = -huuxm / h_um xdivep2 = advx xdivep3 = utwa * humx / h_um xdivep4 = -0.5 * edlsqmx * dbl[:, np.newaxis, np.newaxis] / h_um xdivep = (xdivep1 + xdivep2 + xdivep3 + xdivep4) ydivep1 = huvym / h_um ydivep2 = advy ydivep3 = utwa * hvmy / h_um ydivep = (ydivep1 + ydivep2 + ydivep3) bdivep1 = huwbm / h_um bdivep2 = advb bdivep3 = utwa * hwb_u / h_um bdivep4 = -edpfudmb / h_um bdivep = (bdivep1 + bdivep2 + bdivep3 + bdivep4) X1twa = hdiffum / h_um X2twa = hdudtviscm / h_um terms = np.concatenate( (-advx[:, :, :, :, np.newaxis], -advy[:, :, :, :, np.newaxis], -advb[:, :, :, :, np.newaxis], cor[:, :, :, :, np.newaxis], pfum[:, :, :, :, np.newaxis], xdivep[:, :, :, :, np.newaxis], ydivep[:, :, :, :, np.newaxis], bdivep[:, :, :, :, np.newaxis], X1twa[:, :, :, :, np.newaxis], X2twa[:, :, :, :, np.newaxis]), axis=4) termsep = np.concatenate( (xdivep1[:, :, :, :, np.newaxis], xdivep3[:, :, :, :, np.newaxis], xdivep4[:, :, :, :, np.newaxis], ydivep1[:, :, :, :, np.newaxis], ydivep3[:, :, :, :, np.newaxis], bdivep1[:, :, :, :, np.newaxis], bdivep3[:, :, :, :, np.newaxis], bdivep4[:, :, :, :, np.newaxis]), axis=4) termsm = np.nanmean(terms, axis=meanax, keepdims=True) termsepm = np.nanmean(termsep, axis=meanax, keepdims=True) X = dimu[keepax[1]] Y = dimu[keepax[0]] if 1 in keepax and not calledfrompv: em = np.nanmean(em, axis=meanax, keepdims=True) elm = np.nanmean(elm, axis=meanax, keepdims=True) z = np.linspace(-3000, 0, 100) Y = z P = getvaratzc5( termsm.astype(np.float32), z.astype(np.float32), em.astype(np.float32)) Pep = getvaratzc5( termsepm.astype(np.float32), z.astype(np.float32), em.astype(np.float32)) if fil3: swash = np.nanmean(swash, meanax, keepdims=True) swash = getvaratzc( swash.astype(np.float32), z.astype(np.float32), em.astype(np.float32)).squeeze() else: P = termsm.squeeze() Pep = termsepm.squeeze() if not calledfrompv: np.savez('twamomx_complete_terms', X=X, Y=Y, P=P, Pep=Pep) else: npzfile = np.load('twamomx_complete_terms.npz') X = npzfile['X'] Y = npzfile['Y'] P = npzfile['P'] Pep = npzfile['Pep'] return (X, Y, P, Pep, swash, em.squeeze()) def plot_twamomx(geofil, vgeofil, fil, fil2, xstart, xend, ystart, yend, zs, ze, meanax, fil3=None, cmaxpercfactor=1, cmaxpercfactorforep=1, plotterms=[3, 4, 7], swashperc=1, savfil=None, savfilep=None, alreadysaved=False): X, Y, P, Pep, swash, em = extract_twamomx_terms( geofil, vgeofil, fil, fil2, xstart, xend, ystart, yend, zs, ze, meanax, alreadysaved=alreadysaved, fil3=fil3) P = np.ma.masked_array(P, mask=np.isnan(P)).squeeze() cmax = np.nanpercentile(P, [cmaxpercfactor, 100 - cmaxpercfactor]) cmax = np.max(np.fabs(cmax)) fig, ax = plt.subplots( np.int8(np.ceil(len(plotterms) / 2)), 2, sharex=True, sharey=True, figsize=(10, 3)) ti = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)', '(i)', '(j)'] lab = [ r'$-\hat{u}\hat{u}_{\tilde{x}}$', r'$-\hat{v}\hat{u}_{\tilde{y}}$', r'$-\hat{\varpi}\hat{u}_{\tilde{b}}$', r'$f\hat{v}$', r'$-\overline{m_{\tilde{x}}}$', r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} u ^{\prime \prime} } +\frac{1}{2}\overline{\zeta ^{\prime 2}})_{\tilde{x}}$""", r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} v ^{\prime \prime}})_{\tilde{y}}$""", r"""-$\frac{1}{\overline{h}}(\overline{h}\widehat{u ^{\prime \prime} \varpi ^{\prime \prime}} + \overline{\zeta ^\prime m_{\tilde{x}}^\prime})_{\tilde{b}}$""", r'$\widehat{X^H}$', r'$\widehat{X^V}$' ] for i, p in enumerate(plotterms): axc = ax.ravel()[i] im = m6plot( (X, Y, P[:, :, p]), axc, vmax=cmax, vmin=-cmax, ptype='imshow', txt=lab[p], ylim=(-2000, 0), cmap='RdBu_r', cbar=False) if fil3: cs = axc.contour( X, Y, swash, np.array([swashperc]), colors='grey', linewidths=4) cs = axc.contour( X, Y, P[:, :, p], levels=[-2e-5, -1e-5, 1e-5, 2e-5], colors='k', linestyles='dashed') cs.clabel(inline=True, fmt="%.0e") cs1 = axc.plot(X, em[::4, :].T, 'k') if i % 2 == 0: axc.set_ylabel('z (m)') if i > np.size(ax) - 3: xdegtokm(axc, 0.5 * (ystart + yend)) fig.tight_layout() cb = fig.colorbar(im, ax=ax.ravel().tolist()) cb.formatter.set_powerlimits((0, 0)) cb.update_ticks() if savfil: plt.savefig( savfil + '.eps', dpi=300, facecolor='w', edgecolor='w', format='eps', transparent=False, bbox_inches='tight') else: plt.show() im = m6plot( (X, Y, np.sum(P, axis=2)), vmax=cmax, vmin=-cmax, ptype='imshow', cmap='RdBu_r', ylim=(-2500, 0)) if savfil: plt.savefig( savfil + 'res.eps', dpi=300, facecolor='w', edgecolor='w', format='eps', transparent=False, bbox_inches='tight') else: plt.show() Pep = np.ma.masked_array(Pep, mask=np.isnan(Pep)).squeeze() cmax = np.nanpercentile(Pep, [cmaxpercfactorforep, 100 - cmaxpercfactorforep]) cmax = np.max(np.fabs(cmax)) lab = [ r'$-\frac{(\overline{huu})_{\tilde{x}}}{\overline{h}}$', r'$\frac{\hat{u}(\overline{hu})_{\tilde{x}}}{\overline{h}}$', r"""$-\frac{1}{2\overline{h}}\overline{\zeta ^{\prime 2}}_{\tilde{x}}$""", r'$-\frac{(\overline{huv})_{\tilde{y}}}{\overline{h}}$', r'$\frac{\hat{u}(\overline{hv})_{\tilde{y}}}{\overline{h}}$', r'$-\frac{(\overline{hu\varpi})_{\tilde{b}}}{\overline{h}}$', r'$\frac{\hat{u}(\overline{h\varpi})_{\tilde{b}}}{\overline{h}}$', r"""$-\frac{(\overline{\zeta ^\prime m_{\tilde{x}}^\prime})_{\tilde{b}}}{\overline{h}}$""" ] fig, ax = plt.subplots( np.int8(np.ceil(Pep.shape[-1] / 2)), 2, sharex=True, sharey=True, figsize=(12, 9)) for i in range(Pep.shape[-1]): axc = ax.ravel()[i] im = m6plot( (X, Y, Pep[:, :, i]), axc, vmax=cmax, vmin=-cmax, ptype='imshow', txt=lab[i], cmap='RdBu_r', ylim=(-2500, 0), cbar=False) if fil3: cs = axc.contour(X, Y, swash, np.array([swashperc]), colors='k') if i % 2 == 0: axc.set_ylabel('z (m)') if i > np.size(ax) - 3: xdegtokm(axc, 0.5 * (ystart + yend)) fig.tight_layout() cb = fig.colorbar(im, ax=ax.ravel().tolist()) cb.formatter.set_powerlimits((0, 0)) cb.update_ticks() if savfilep: plt.savefig( savfilep + '.eps', dpi=300, facecolor='w', edgecolor='w', format='eps', transparent=False, bbox_inches='tight') else: plt.show()
gpl-3.0
thunderhoser/GewitterGefahr
gewittergefahr/gg_utils/storm_tracking_eval_test.py
1
4622
"""Unit tests for storm_tracking_eval.py.""" import copy import unittest import numpy import pandas from gewittergefahr.gg_utils import storm_tracking_eval as tracking_eval from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils TOLERANCE = 1e-6 # The following constants are used to test _fit_theil_sen_one_track. VALID_TIMES_ONE_TRACK_UNIX_SEC = numpy.array( [0, 1, 2, 2, 3, 4, 5, 6, 7, 7], dtype=int ) X_COORDS_ONE_TRACK_METRES = numpy.array([ -10, -7.5, -5, -5, -2.5, 0, 2.5, 5, 7.5, 7.5 ]) Y_COORDS_ONE_TRACK_METRES = numpy.array([ 5, 1.4, -2.2, -2.2, -5.8, -9.4, -13, -16.6, -20.2, -20.2 ]) X_INTERCEPT_ONE_TRACK_METRES = -10. X_VELOCITY_ONE_TRACK_M_S01 = 2.5 Y_INTERCEPT_ONE_TRACK_METRES = 5. Y_VELOCITY_ONE_TRACK_M_S01 = -3.6 # The following constants are used to test _apply_theil_sen_one_track. STORM_TRACK_DICT = { tracking_eval.X_INTERCEPT_COLUMN: [X_INTERCEPT_ONE_TRACK_METRES], tracking_eval.X_VELOCITY_COLUMN: [X_VELOCITY_ONE_TRACK_M_S01], tracking_eval.Y_INTERCEPT_COLUMN: [Y_INTERCEPT_ONE_TRACK_METRES], tracking_eval.Y_VELOCITY_COLUMN: [Y_VELOCITY_ONE_TRACK_M_S01] } STORM_TRACK_TABLE = pandas.DataFrame.from_dict(STORM_TRACK_DICT) NESTED_ARRAY = STORM_TRACK_TABLE[[ tracking_eval.X_INTERCEPT_COLUMN, tracking_eval.X_INTERCEPT_COLUMN ]].values.tolist() STORM_TRACK_TABLE = STORM_TRACK_TABLE.assign(**{ tracking_utils.TRACK_TIMES_COLUMN: NESTED_ARRAY, tracking_utils.TRACK_X_COORDS_COLUMN: NESTED_ARRAY, tracking_utils.TRACK_Y_COORDS_COLUMN: NESTED_ARRAY }) THESE_TIMES_UNIX_SEC = numpy.array([10, 10, 15, 15, 20, 25, 30, 40], dtype=int) ACTUAL_X_COORDS_METRES = numpy.array([12, 19, 22, 33, 38, 50.5, 63, 86]) ACTUAL_Y_COORDS_METRES = numpy.array([ -38, -25, -60, -45, -66.6, -88, -102, -130 ]) EXPECTED_X_COORDS_METRES = numpy.array([15, 15, 27.5, 27.5, 40, 52.5, 65, 90]) EXPECTED_Y_COORDS_METRES = numpy.array( [-31, -31, -49, -49, -67, -85, -103, -139], dtype=float ) STORM_TRACK_TABLE[tracking_utils.TRACK_TIMES_COLUMN].values[0] = ( THESE_TIMES_UNIX_SEC ) STORM_TRACK_TABLE[tracking_utils.TRACK_X_COORDS_COLUMN].values[0] = ( ACTUAL_X_COORDS_METRES ) STORM_TRACK_TABLE[tracking_utils.TRACK_Y_COORDS_COLUMN].values[0] = ( ACTUAL_Y_COORDS_METRES ) # The following constants are used to test _get_mean_ts_error_one_track. RMSE_METRES = numpy.sqrt(numpy.mean( (EXPECTED_X_COORDS_METRES - ACTUAL_X_COORDS_METRES) ** 2 + (EXPECTED_Y_COORDS_METRES - ACTUAL_Y_COORDS_METRES) ** 2 )) class StormTrackingEvalTests(unittest.TestCase): """Each method is a unit test for storm_tracking_eval.py.""" def test_fit_theil_sen_one_track(self): """Ensures correct output from _fit_theil_sen_one_track.""" this_dict = tracking_eval._fit_theil_sen_one_track( x_coords_metres=X_COORDS_ONE_TRACK_METRES, y_coords_metres=Y_COORDS_ONE_TRACK_METRES, valid_times_unix_sec=VALID_TIMES_ONE_TRACK_UNIX_SEC) self.assertTrue(numpy.isclose( this_dict[tracking_eval.X_INTERCEPT_KEY], X_INTERCEPT_ONE_TRACK_METRES, atol=TOLERANCE )) self.assertTrue(numpy.isclose( this_dict[tracking_eval.X_VELOCITY_KEY], X_VELOCITY_ONE_TRACK_M_S01, atol=TOLERANCE )) self.assertTrue(numpy.isclose( this_dict[tracking_eval.Y_INTERCEPT_KEY], Y_INTERCEPT_ONE_TRACK_METRES, atol=TOLERANCE )) self.assertTrue(numpy.isclose( this_dict[tracking_eval.Y_VELOCITY_KEY], Y_VELOCITY_ONE_TRACK_M_S01, atol=TOLERANCE )) def test_apply_theil_sen_one_track(self): """Ensures correct output from _apply_theil_sen_one_track.""" these_x_coords_metres, these_y_coords_metres = ( tracking_eval._apply_theil_sen_one_track( storm_track_table=copy.deepcopy(STORM_TRACK_TABLE), row_index=0) ) self.assertTrue(numpy.allclose( these_x_coords_metres, EXPECTED_X_COORDS_METRES, atol=TOLERANCE )) self.assertTrue(numpy.allclose( these_y_coords_metres, EXPECTED_Y_COORDS_METRES, atol=TOLERANCE )) def test_get_mean_ts_error_one_track(self): """Ensures correct output from _get_mean_ts_error_one_track.""" this_rmse_metres = tracking_eval._get_mean_ts_error_one_track( storm_track_table=copy.deepcopy(STORM_TRACK_TABLE), row_index=0 ) self.assertTrue(numpy.isclose( RMSE_METRES, this_rmse_metres, atol=TOLERANCE )) if __name__ == '__main__': unittest.main()
mit
ajZiiiN/honest-blackops
src/return_ml_cron.py
1
5638
import lib.dbUtil as dbUtil import lib.queries as qUtil import pandas as pd import numpy as np from datetime import datetime, date, time, timedelta import os from sklearn import metrics from sklearn.model_selection import train_test_split import pickle DATA_DIR = "../data/raw" PRED_DIR = "../data/pred_ml" def scaleRange(X, R = (0,1)): from sklearn.preprocessing import MinMaxScaler oldRange = [np.amin(X), np.amax(X)] scaler = MinMaxScaler(feature_range = R) return scaler.fit_transform(X), oldRange # dataset-1: queries.getInstallDayGameDataW0ReturnQ def getInstallDataset(startDate, save = True, loadExisting = False): if loadExisting == True: return pd.read_csv(os.path.join(DATA_DIR, str(startDate) + ".csv")) data = [] cols = ['pid', 'chaal_by_blind', 'avg_win_by_boot', 'avg_loss_by_boot', 'times_loaded', 'isReturnW0'] q = qUtil.getInstallDayGameDataW0ReturnQ.replace('${date}$', str(startDate)) count = 0 result = dbUtil.runQuery(q) count = count+1 total = result.rowcount row = result.fetchone() while row != None: rowData = (row[0], float(row[1]), float(row[2]), float(row[3]), int(row[4]), int(row[5])) data.append(rowData) print(("Done: ", count, total)) count = count+1 row = result.fetchone() df = pd.DataFrame(data, columns = cols) if save == True: df.to_csv(os.path.join(DATA_DIR, str(startDate) + ".csv")) return df # dataset-2: queries.getInstallDayGameDataW0Return_2_Q def getInstallDataset_2(startDate , save = True, loadExisting = False): if loadExisting == True and os.path.exists(os.path.join(DATA_DIR, str(startDate) + ".csv")): return pd.read_csv(os.path.join(DATA_DIR, str(startDate) + ".csv")) data = [] cols = ['pid', 'chaal_by_blind', 'avg_win_by_boot', 'avg_loss_by_boot', 'games_played','times_loaded', 'isReturnW0'] q = qUtil.getInstallDayGameDataW0Return_2_Q.replace('${date}$', str(startDate)) count = 0 result = dbUtil.runQuery(q) count = count+1 total = result.rowcount row = result.fetchone() while row != None: rowData = (row[0], float(row[1]), float(row[2]), float(row[3]), int(row[4]) ,int(row[5]), int(row[6])) data.append(rowData) print(("Done: ", count, total)) count = count+1 row = result.fetchone() df = pd.DataFrame(data, columns = cols) if save == True: df.to_csv(os.path.join(DATA_DIR, str(startDate) + ".csv")) return df # adding model creation def trainModel(X_train, y_train, clf): return clf.fit(X_train, y_train) def evalModel(X_test, y_test, clf): y_pred = clf.predict(X_test) print("Accuracy Score: ", metrics.accuracy_score(y_test, y_pred)) print("F1 score: ", metrics.f1_score(y_test, y_pred, average='binary')) return y_pred def splitTrainTestData(df): y = df.pop('isReturnW0') pids = df.pop('pid') X = df[['chaal_by_blind', 'avg_win_by_boot', 'avg_loss_by_boot', 'games_played','times_loaded']] X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0, shuffle = False) return X_train,X_test,y_train,y_test, pids def splitTrainTestDataNorm(df): y = df.pop('isReturnW0') #X = df[['chaal_by_blind', 'avg_win_by_boot', 'avg_loss_by_boot', 'games_played','times_loaded']] pids = df.pop('pid') # Normalizing data minMax = {} X = pd.DataFrame() X['cbb'], minMax['cbb'] = scaleRange(df['chaal_by_blind']) X['awd'], minMax['awb'] = scaleRange(df['avg_win_by_boot']) X['alb'], minMax['alb'] = scaleRange(df['avg_loss_by_boot']) X['gp'], minMax['gp'] = scaleRange(df['games_played']) X['tl'], minMax['tl'] = scaleRange(df['times_loaded']) X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0, shuffle = False) return X_train,X_test,y_train,y_test , pids, minMax def writeClassifierToFile(clf, filename): from sklearn.externals import joblib pickle_path = os.path.join("../data/ml_model",filename) joblib.dump(clf, pickle_path) def readClassifierFromFile(filename): from sklearn.externals import joblib pickle_path = os.path.join('../data/ml_model',filename) clf = joblib.load(pickle_path) return clf def doPrediction(curDate): df = getInstallDataset_2(curDate, loadExisting=False) X_train,X_test,y_train,y_test, pids, = splitTrainTestData(df) print(X_train.shape,y_train.shape, X_test.shape, y_test.shape) clf = readClassifierFromFile( "W1_return.pkl") y_val_pred2 = evalModel(X_train, y_train, clf) return pids, y_train, y_val_pred2 def getApiPrediction(pid, date_str): import requests URL = "http://172.31.0.81:9090/ret" PARAMS = {'date': date_str, 'pid': pid} r = requests.get(url = URL, params = PARAMS) print(r.json()) return r.json()['isReturn'] def main(): # runs cron for yesterday yest = date(2017, 10, 14) #yest = datetime.now().date() - timedelta(days = 1) pids, y_train, y_pred = doPrediction(yest) print("Got dataset for : ", str(yest)) df_predicts = pd.DataFrame() df_predicts['pids'] = pids df_predicts['ret_actual']= y_train print("Got pids and actual return: ", df_predicts.head()) df_predicts['ret_pred'] = y_pred print("Got Home prediction: ", df_predicts.head()) pred = {} for index, row in df_predicts.iterrows(): pred[str(row['pids'])] = row['ret_pred'] with open(os.path.join(PRED_DIR, str(yest) + ".pkl"), "wb") as pf: pickle.dump(pred, pf, 1) print("Done Prediction for : ", str(yest)) if __name__ == "__main__": main()
mit
jcirni/grit
grit.py
1
5623
import pandas as pd import argparse import os.path import re import datetime from datetime pd.set_option('display.width', 120) #catch bad file path def valid_file(arg): if not os.path.isfile(arg): parser.error("File does not exist!") else: return arg #standard outputs for data validation def err_data(value, row, col): log = open('error.log', 'a') log.write(str(datetime.now()) + ": Your value, " + value + ", at row %d and col %d does not contain appropriate data\n" % (row, col)) #test chrom data def valid_chrom(chrom): try: return chrom[0:3] == 'chr' and int(chrom[3:]) < 23 except ValueError: return False #test start position def valid_start_position(value): return 0 < value and value < (2 ** 32 + 1) #test end position def valid_end_position(end, start): return 0 < end and end < (2 ** 32 + 1) and start < end #test feature name def valid_feature_name(feature): return re.match("^[\w_()-]*$", feature) #validate data set def valid_data(data, length): '''iterate through columns of dataset, log errors''' chromErrs, startErrs, endErrs, featErrs, strandErrs = 0,0,0,0,0 i = 0 print "Data Validation Results:" while i < length: #validate chrom field if valid_chrom(data.iloc[i][0]): #cut chr prefix and update value data.set_value(i,0, (data.iloc[i][0])[3:], takeable=True) else: err_data(data.iloc[i][0],i,0) chromErrs += 1 #validate start position if not valid_start_position(data.iloc[i][1]): err_data(data.iloc[i][1],i,1) startErrs += 1 #validate end position if not valid_end_position(data.iloc[i][2], data.iloc[i][1]): err_data(data.iloc[i][2],i,2) endErrs += 1 #validate feature name if not valid_feature_name(data.iloc[i][3]): err_data(data.iloc[i][3],i,3) featErrs += 1 #validate strand if not (data.iloc[i][4] == '-' or data.iloc[i][4] == '+'): err_data(data.iloc[i][4],i,4) strandErrs += 1 i += 1 print "chrom field reported %d errors!" % (chromErrs) print "start position field reported %d errors!" % (startErrs) print "end position field reported %d errors!" % (endErrs) print "feature name field reported %d errors!" % (featErrs) print "strand field reported %d errors!" % (strandErrs) return chromErrs + startErrs + endErrs + featErrs + strandErrs == 0 def get_user_chrom(): opt = raw_input("Enter chromosome value (format chr##): ") if valid_chrom(opt): return int(opt[3:]) else: print "invalid input, please try again" get_user_chrom() def get_user_pos(): opt = raw_input("OPTIONAL: Enter position value. Must be a number 1 - 2^32. Or enter no: ") if opt == 'no' or opt =='No' or opt == 'n': return None else: try: opt = int(opt) if valid_start_position(opt): return opt except ValueError: print "invalid input, value must be 1 - 2^32. Please try again" get_user_pos() def get_user_feature(): opt = raw_input("Enter feature name: ") if valid_feature_name(opt): return opt else: print "invalid input, please try again" get_user_feature() #queries dataset for chromosome def search_for_chrom(data, chrom, pos): if pos == None: return data.loc[data['chromosome'] == chrom] else: return data.loc[(data['chromosome'] == chrom) & (data['start_pos'] < pos) & (data['end_pos'] > pos)] #queries dataset for feature def search_for_feature(data, feature): return data.loc[data['feature_name'] == feature] #summary metrix def get_metrics(): print print "================== Your Metrics ======================" print #count unique features per chromosome by chromosome and starting position print "Features in data set: %d" % len(df.groupby(['chromosome', 'start_pos']).size().index) #new length column df['length'] = df['end_pos'] - df['start_pos'] #min print "The smallest length is: %d" % df['length'].min() #max print "The largest length is: %d" % df['length'].max() #mean print "The average length is: %d" % df['length'].mean() #count unique features per strand print print "==== Features per Strand ====" print print df.groupby(['chromosome', 'strand']).size() #get user's next action def get_user_input(): opta = raw_input("Query data, get stats, or exit? (q/s/x): ") if opta == 'q': optb = raw_input("Search by pos or feat? (pos/feat): ") if optb == 'pos': print search_for_chrom(df, get_user_chrom(), get_user_pos()) get_user_input() elif optb == 'feat': print search_for_feature(df, get_user_feature()) get_user_input() else: print "invalid input please try again (pos/feat): " get_user_input() elif opta == 's': get_metrics() get_user_input() elif opta == 'x': print "All done." exit() else: print "invalid input, please try again" get_user_input() #command line arguments parser = argparse.ArgumentParser(description='Parses genetic annotation data and allows querying with command line arguments. Requires file (-f /path/to/file)') parser.add_argument('-f', '--file', type=str, help='path to file') #parse arguments args = parser.parse_args() #check for valid file and read file if args.file: print "Validating data now!" df = pd.read_csv(valid_file(args.file), '\t' , header=None) df.columns = ['chromosome', 'start_pos', 'end_pos', 'feature_name', 'strand'] print if valid_data(df, len(df.index)): print "Data validation complete!" #now that data is good, strip chr prefix from chrom column df[['chromosome']] = df[['chromosome']].apply(pd.to_numeric) print df #what does user want to do now? get_user_input() else: print print 'Please make corrections to your data sample' else: print parser.description
mit
yeahkun/tushare
tushare/stock/fundamental.py
4
14920
# -*- coding:utf-8 -*- """ 基本面数据接口 Created on 2015/01/18 @author: Jimmy Liu @group : waditu @contact: [email protected] """ import pandas as pd from tushare.stock import cons as ct import lxml.html from lxml import etree import re from pandas.compat import StringIO try: from urllib.request import urlopen, Request except ImportError: from urllib2 import urlopen, Request def get_stock_basics(): """ 获取沪深上市公司基本情况 Return -------- DataFrame code,代码 name,名称 industry,细分行业 area,地区 pe,市盈率 outstanding,流通股本 totals,总股本(万) totalAssets,总资产(万) liquidAssets,流动资产 fixedAssets,固定资产 reserved,公积金 reservedPerShare,每股公积金 eps,每股收益 bvps,每股净资 pb,市净率 timeToMarket,上市日期 """ request = Request(ct.ALL_STOCK_BASICS_FILE) text = urlopen(request, timeout=10).read() text = text.decode('GBK') df = pd.read_csv(StringIO(text), dtype={'code':'object'}) df = df.set_index('code') return df def get_report_data(year, quarter): """ 获取业绩报表数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 eps,每股收益 eps_yoy,每股收益同比(%) bvps,每股净资产 roe,净资产收益率(%) epcf,每股现金流量(元) net_profits,净利润(万元) profits_yoy,净利润同比(%) distrib,分配方案 report_date,发布日期 """ if ct._check_input(year,quarter) is True: ct._write_head() df = _get_report_data(year, quarter, 1, pd.DataFrame()) if df is not None: df = df.drop_duplicates('code') df['code'] = df['code'].map(lambda x:str(x).zfill(6)) return df def _get_report_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.REPORT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df = df.drop(11, axis=1) df.columns = ct.REPORT_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_report_data(year, quarter, pageNo, dataArr) else: return dataArr except Exception as e: print(e) def get_profit_data(year, quarter): """ 获取盈利能力数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 roe,净资产收益率(%) net_profit_ratio,净利率(%) gross_profit_rate,毛利率(%) net_profits,净利润(万元) eps,每股收益 business_income,营业收入(百万元) bips,每股主营业务收入(元) """ if ct._check_input(year, quarter) is True: ct._write_head() data = _get_profit_data(year, quarter, 1, pd.DataFrame()) if data is not None: data = data.drop_duplicates('code') data['code'] = data['code'].map(lambda x:str(x).zfill(6)) return data def _get_profit_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.PROFIT_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df.columns=ct.PROFIT_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_profit_data(year, quarter, pageNo, dataArr) else: return dataArr except: pass def get_operation_data(year, quarter): """ 获取营运能力数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 arturnover,应收账款周转率(次) arturndays,应收账款周转天数(天) inventory_turnover,存货周转率(次) inventory_days,存货周转天数(天) currentasset_turnover,流动资产周转率(次) currentasset_days,流动资产周转天数(天) """ if ct._check_input(year, quarter) is True: ct._write_head() data = _get_operation_data(year, quarter, 1, pd.DataFrame()) if data is not None: data = data.drop_duplicates('code') data['code'] = data['code'].map(lambda x:str(x).zfill(6)) return data def _get_operation_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.OPERATION_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df.columns=ct.OPERATION_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_operation_data(year, quarter, pageNo, dataArr) else: return dataArr except Exception as e: print(e) def get_growth_data(year, quarter): """ 获取成长能力数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 mbrg,主营业务收入增长率(%) nprg,净利润增长率(%) nav,净资产增长率 targ,总资产增长率 epsg,每股收益增长率 seg,股东权益增长率 """ if ct._check_input(year, quarter) is True: ct._write_head() data = _get_growth_data(year, quarter, 1, pd.DataFrame()) if data is not None: data = data.drop_duplicates('code') data['code'] = data['code'].map(lambda x:str(x).zfill(6)) return data def _get_growth_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.GROWTH_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df.columns=ct.GROWTH_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_growth_data(year, quarter, pageNo, dataArr) else: return dataArr except Exception as e: print(e) def get_debtpaying_data(year, quarter): """ 获取偿债能力数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 currentratio,流动比率 quickratio,速动比率 cashratio,现金比率 icratio,利息支付倍数 sheqratio,股东权益比率 adratio,股东权益增长率 """ if ct._check_input(year, quarter) is True: ct._write_head() df = _get_debtpaying_data(year, quarter, 1, pd.DataFrame()) if df is not None: df = df.drop_duplicates('code') df['code'] = df['code'].map(lambda x:str(x).zfill(6)) return df def _get_debtpaying_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.DEBTPAYING_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df.columns = ct.DEBTPAYING_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_debtpaying_data(year, quarter, pageNo, dataArr) else: return dataArr except Exception as e: print(e) def get_cashflow_data(year, quarter): """ 获取现金流量数据 Parameters -------- year:int 年度 e.g:2014 quarter:int 季度 :1、2、3、4,只能输入这4个季度 说明:由于是从网站获取的数据,需要一页页抓取,速度取决于您当前网络速度 Return -------- DataFrame code,代码 name,名称 cf_sales,经营现金净流量对销售收入比率 rateofreturn,资产的经营现金流量回报率 cf_nm,经营现金净流量与净利润的比率 cf_liabilities,经营现金净流量对负债比率 cashflowratio,现金流量比率 """ if ct._check_input(year, quarter) is True: ct._write_head() df = _get_cashflow_data(year, quarter, 1, pd.DataFrame()) if df is not None: df = df.drop_duplicates('code') df['code'] = df['code'].map(lambda x:str(x).zfill(6)) return df def _get_cashflow_data(year, quarter, pageNo, dataArr): ct._write_console() try: request = Request(ct.CASHFLOW_URL%(ct.P_TYPE['http'], ct.DOMAINS['vsf'], ct.PAGES['fd'], year, quarter, pageNo, ct.PAGE_NUM[1])) text = urlopen(request, timeout=10).read() text = text.decode('GBK') html = lxml.html.parse(StringIO(text)) res = html.xpath("//table[@class=\"list_table\"]/tr") if ct.PY3: sarr = [etree.tostring(node).decode('utf-8') for node in res] else: sarr = [etree.tostring(node) for node in res] sarr = ''.join(sarr) sarr = '<table>%s</table>'%sarr df = pd.read_html(sarr)[0] df.columns = ct.CASHFLOW_COLS dataArr = dataArr.append(df, ignore_index=True) nextPage = html.xpath('//div[@class=\"pages\"]/a[last()]/@onclick') if len(nextPage)>0: pageNo = re.findall(r'\d+', nextPage[0])[0] return _get_cashflow_data(year, quarter, pageNo, dataArr) else: return dataArr except Exception as e: print(e) def _data_path(): import os import inspect caller_file = inspect.stack()[1][1] pardir = os.path.abspath(os.path.join(os.path.dirname(caller_file), os.path.pardir)) return os.path.abspath(os.path.join(pardir, os.path.pardir))
bsd-3-clause
vhaasteren/scipy
scipy/stats/_multivariate.py
17
69089
# # Author: Joris Vankerschaver 2013 # from __future__ import division, print_function, absolute_import import numpy as np import scipy.linalg from scipy.misc import doccer from scipy.special import gammaln, psi, multigammaln from scipy._lib._util import check_random_state __all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart'] _LOG_2PI = np.log(2 * np.pi) _LOG_2 = np.log(2) _LOG_PI = np.log(np.pi) def _process_parameters(dim, mean, cov): """ Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix. """ # Try to infer dimensionality if dim is None: if mean is None: if cov is None: dim = 1 else: cov = np.asarray(cov, dtype=float) if cov.ndim < 2: dim = 1 else: dim = cov.shape[0] else: mean = np.asarray(mean, dtype=float) dim = mean.size else: if not np.isscalar(dim): raise ValueError("Dimension of random variable must be a scalar.") # Check input sizes and return full arrays for mean and cov if necessary if mean is None: mean = np.zeros(dim) mean = np.asarray(mean, dtype=float) if cov is None: cov = 1.0 cov = np.asarray(cov, dtype=float) if dim == 1: mean.shape = (1,) cov.shape = (1, 1) if mean.ndim != 1 or mean.shape[0] != dim: raise ValueError("Array 'mean' must be a vector of length %d." % dim) if cov.ndim == 0: cov = cov * np.eye(dim) elif cov.ndim == 1: cov = np.diag(cov) elif cov.ndim == 2 and cov.shape != (dim, dim): rows, cols = cov.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(cov.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'mean' is a vector of length %d.") msg = msg % (str(cov.shape), len(mean)) raise ValueError(msg) elif cov.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % cov.ndim) return dim, mean, cov def _process_quantiles(x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): """ Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility. """ if rcond is not None: cond = rcond if cond in [None, -1]: t = spectrum.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps eps = cond * np.max(abs(spectrum)) return eps def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) class _PSD(object): """ Compute coordinated functions of a symmetric positive semidefinite matrix. This class addresses two issues. Firstly it allows the pseudoinverse, the logarithm of the pseudo-determinant, and the rank of the matrix to be computed using one call to eigh instead of three. Secondly it allows these functions to be computed in a way that gives mutually compatible results. All of the functions are computed with a common understanding as to which of the eigenvalues are to be considered negligibly small. The functions are designed to coordinate with scipy.linalg.pinvh() but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). Parameters ---------- M : array_like Symmetric positive semidefinite matrix (2-D). cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of M. (Default: lower) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. allow_singular : bool, optional Whether to allow a singular matrix. (Default: True) Notes ----- The arguments are similar to those of scipy.linalg.pinvh(). """ def __init__(self, M, cond=None, rcond=None, lower=True, check_finite=True, allow_singular=True): # Compute the symmetric eigendecomposition. # Note that eigh takes care of array conversion, chkfinite, # and assertion that the matrix is square. s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) eps = _eigvalsh_to_eps(s, cond, rcond) if np.min(s) < -eps: raise ValueError('the input matrix must be positive semidefinite') d = s[s > eps] if len(d) < len(s) and not allow_singular: raise np.linalg.LinAlgError('singular matrix') s_pinv = _pinv_1d(s, eps) U = np.multiply(u, np.sqrt(s_pinv)) # Initialize the eagerly precomputed attributes. self.rank = len(d) self.U = U self.log_pdet = np.sum(np.log(d)) # Initialize an attribute to be lazily computed. self._pinv = None @property def pinv(self): if self._pinv is None: self._pinv = np.dot(self.U, self.U.T) return self._pinv _doc_default_callparams = """\ mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional Whether to allow a singular covariance matrix. (Default: False) """ _doc_callparams_note = \ """Setting the parameter `mean` to `None` is equivalent to having `mean` be the zero-vector. The parameter `cov` can be a scalar, in which case the covariance matrix is the identity times that value, a vector of diagonal entries for the covariance matrix, or a two-dimensional array_like. """ _doc_random_state = """\ random_state : None or int or np.random.RandomState instance, optional If int or RandomState, use it for drawing the random variates. If None (or np.random), the global np.random state is used. Default is None. """ _doc_frozen_callparams = "" _doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" docdict_params = { '_doc_default_callparams': _doc_default_callparams, '_doc_callparams_note': _doc_callparams_note, '_doc_random_state': _doc_random_state } docdict_noparams = { '_doc_default_callparams': _doc_frozen_callparams, '_doc_callparams_note': _doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class multi_rv_generic(object): """ Class which encapsulates common functionality between all multivariate distributions. """ def __init__(self, seed=None): super(multi_rv_generic, self).__init__() self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def _get_random_state(self, random_state): if random_state is not None: return check_random_state(random_state) else: return self._random_state class multi_rv_frozen(object): """ Class which encapsulates common functionality between all frozen multivariate distributions. """ @property def random_state(self): return self._dist._random_state @random_state.setter def random_state(self, seed): self._dist._random_state = check_random_state(seed) class multivariate_normal_gen(multi_rv_generic): r""" A multivariate normal random variable. The `mean` keyword specifies the mean. The `cov` keyword specifies the covariance matrix. Methods ------- ``pdf(x, mean=None, cov=1, allow_singular=False)`` Probability density function. ``logpdf(x, mean=None, cov=1, allow_singular=False)`` Log of the probability density function. ``rvs(mean=None, cov=1, size=1, random_state=None)`` Draw random samples from a multivariate normal distribution. ``entropy()`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the mean and covariance parameters, returning a "frozen" multivariate normal random variable: rv = multivariate_normal(mean=None, cov=1, allow_singular=False) - Frozen object with the same methods but holding the given mean and covariance fixed. Notes ----- %(_doc_callparams_note)s The covariance matrix `cov` must be a (symmetric) positive semi-definite matrix. The determinant and inverse of `cov` are computed as the pseudo-determinant and pseudo-inverse, respectively, so that `cov` does not need to have full rank. The probability density function for `multivariate_normal` is .. math:: f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, and :math:`k` is the dimension of the space where :math:`x` takes values. .. versionadded:: 0.14.0 Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import multivariate_normal >>> x = np.linspace(0, 5, 10, endpoint=False) >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) >>> fig1 = plt.figure() >>> ax = fig1.add_subplot(111) >>> ax.plot(x, y) The input quantiles can be any shape of array, as long as the last axis labels the components. This allows us for instance to display the frozen pdf for a non-isotropic random variable in 2D as follows: >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] >>> pos = np.empty(x.shape + (2,)) >>> pos[:, :, 0] = x; pos[:, :, 1] = y >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) >>> fig2 = plt.figure() >>> ax2 = fig2.add_subplot(111) >>> ax2.contourf(x, y, rv.pdf(pos)) """ def __init__(self, seed=None): super(multivariate_normal_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, docdict_params) def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information. """ return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed) def _logpdf(self, x, mean, prec_U, log_det_cov, rank): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ dev = x - mean maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) def logpdf(self, x, mean, cov, allow_singular=False): """ Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) return _squeeze_output(out) def pdf(self, x, mean, cov, allow_singular=False): """ Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) return _squeeze_output(out) def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate normal distribution. Parameters ---------- %(_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) random_state = self._get_random_state(random_state) out = random_state.multivariate_normal(mean, cov, size) return _squeeze_output(out) def entropy(self, mean=None, cov=1): """ Compute the differential entropy of the multivariate normal. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) return 0.5 * logdet multivariate_normal = multivariate_normal_gen() class multivariate_normal_frozen(multi_rv_frozen): def __init__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]]) """ self.dim, self.mean, self.cov = _process_parameters(None, mean, cov) self.cov_info = _PSD(self.cov, allow_singular=allow_singular) self._dist = multivariate_normal_gen(seed) def logpdf(self, x): x = _process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.mean, self.cov_info.U, self.cov_info.log_pdet, self.cov_info.rank) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.mean, self.cov, size, random_state) def entropy(self): """ Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution """ log_pdet = self.cov_info.log_pdet rank = self.cov_info.rank return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs']: method = multivariate_normal_gen.__dict__[name] method_frozen = multivariate_normal_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, docdict_params) _dirichlet_doc_default_callparams = """\ alpha : array_like The concentration parameters. The number of entries determines the dimensionality of the distribution. """ _dirichlet_doc_frozen_callparams = "" _dirichlet_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" dirichlet_docdict_params = { '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, '_doc_random_state': _doc_random_state } dirichlet_docdict_noparams = { '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, '_doc_random_state': _doc_random_state } def _dirichlet_check_parameters(alpha): alpha = np.asarray(alpha) if np.min(alpha) <= 0: raise ValueError("All parameters must be greater than 0") elif alpha.ndim != 1: raise ValueError("Parameter vector 'a' must be one dimensional, " + "but a.shape = %s." % str(alpha.shape)) return alpha def _dirichlet_check_input(alpha, x): x = np.asarray(x) if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: raise ValueError("Vector 'x' must have one entry less then the" + " parameter vector 'a', but alpha.shape = " + "%s and " % alpha.shape + "x.shape = %s." % x.shape) if x.shape[0] != alpha.shape[0]: xk = np.array([1 - np.sum(x, 0)]) if xk.ndim == 1: x = np.append(x, xk) elif xk.ndim == 2: x = np.vstack((x, xk)) else: raise ValueError("The input must be one dimensional or a two " "dimensional matrix containing the entries.") if np.min(x) < 0: raise ValueError("Each entry in 'x' must be greater or equal zero.") if np.max(x) > 1: raise ValueError("Each entry in 'x' must be smaller or equal one.") if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but sum(x)=%f." % np.sum(x, 0)) return x def _lnB(alpha): r""" Internal helper function to compute the log of the useful quotient .. math:: B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only """ return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) class dirichlet_gen(multi_rv_generic): r""" A Dirichlet random variable. The `alpha` keyword specifies the concentration parameters of the distribution. .. versionadded:: 0.15.0 Methods ------- ``pdf(x, alpha)`` Probability density function. ``logpdf(x, alpha)`` Log of the probability density function. ``rvs(alpha, size=1, random_state=None)`` Draw random samples from a Dirichlet distribution. ``mean(alpha)`` The mean of the Dirichlet distribution ``var(alpha)`` The variance of the Dirichlet distribution ``entropy(alpha)`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix concentration parameters, returning a "frozen" Dirichlet random variable: rv = dirichlet(alpha) - Frozen object with the same methods but holding the given concentration parameters fixed. Notes ----- Each :math:`\alpha` entry must be positive. The distribution has only support on the simplex defined by .. math:: \sum_{i=1}^{K} x_i \le 1 The probability density function for `dirichlet` is .. math:: f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} where .. math:: \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the concentration parameters and :math:`K` is the dimension of the space where :math:`x` takes values. """ def __init__(self, seed=None): super(dirichlet_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) def __call__(self, alpha, seed=None): return dirichlet_frozen(alpha, seed=seed) def _logpdf(self, x, alpha): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function %(_dirichlet_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ lnB = _lnB(alpha) return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0) def logpdf(self, x, alpha): """ Log of the Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = self._logpdf(x, alpha) return _squeeze_output(out) def pdf(self, x, alpha): """ The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray The probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = np.exp(self._logpdf(x, alpha)) return _squeeze_output(out) def mean(self, alpha): """ Compute the mean of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- mu : scalar Mean of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) out = alpha / (np.sum(alpha)) return _squeeze_output(out) def var(self, alpha): """ Compute the variance of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- v : scalar Variance of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) return out def entropy(self, alpha): """ Compute the differential entropy of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- h : scalar Entropy of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) lnB = _lnB(alpha) K = alpha.shape[0] out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( (alpha - 1) * scipy.special.psi(alpha)) return _squeeze_output(out) def rvs(self, alpha, size=1, random_state=None): """ Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. """ alpha = _dirichlet_check_parameters(alpha) random_state = self._get_random_state(random_state) return random_state.dirichlet(alpha, size=size) dirichlet = dirichlet_gen() class dirichlet_frozen(multi_rv_frozen): def __init__(self, alpha, seed=None): self.alpha = _dirichlet_check_parameters(alpha) self._dist = dirichlet_gen(seed) def logpdf(self, x): return self._dist.logpdf(x, self.alpha) def pdf(self, x): return self._dist.pdf(x, self.alpha) def mean(self): return self._dist.mean(self.alpha) def var(self): return self._dist.var(self.alpha) def entropy(self): return self._dist.entropy(self.alpha) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.alpha, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']: method = dirichlet_gen.__dict__[name] method_frozen = dirichlet_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, dirichlet_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) _wishart_doc_default_callparams = """\ df : int Degrees of freedom, must be greater than or equal to dimension of the scale matrix scale : array_like Symmetric positive definite scale matrix of the distribution """ _wishart_doc_callparams_note = "" _wishart_doc_frozen_callparams = "" _wishart_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" wishart_docdict_params = { '_doc_default_callparams': _wishart_doc_default_callparams, '_doc_callparams_note': _wishart_doc_callparams_note, '_doc_random_state': _doc_random_state } wishart_docdict_noparams = { '_doc_default_callparams': _wishart_doc_frozen_callparams, '_doc_callparams_note': _wishart_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class wishart_gen(multi_rv_generic): r""" A Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal precision matrix (the inverse of the covariance matrix). Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from a Wishart distribution. ``entropy()`` Compute the differential entropy of the Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" Wishart random variable: rv = wishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- invwishart, chi2 Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The Wishart distribution is often denoted .. math:: W_p(\nu, \Sigma) where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the :math:`p \times p` scale matrix. The probability density function for `wishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} \exp\left( -tr(\Sigma^{-1} S) / 2 \right) If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). If the scale matrix is 1-dimensional and equal to one, then the Wishart distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` distribution. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import wishart, chi2 >>> x = np.linspace(1e-5, 8, 100) >>> w = wishart.pdf(x, df=3, scale=1); w[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> c = chi2.pdf(x, 3); c[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> plt.plot(x, w) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(wishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen Wishart distribution. See `wishart_frozen` for more information. """ return wishart_frozen(df, scale, seed) def _process_parameters(self, df, scale): if scale is None: scale = 1.0 scale = np.asarray(scale, dtype=float) if scale.ndim == 0: scale = scale[np.newaxis,np.newaxis] elif scale.ndim == 1: scale = np.diag(scale) elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: raise ValueError("Array 'scale' must be square if it is two" " dimensional, but scale.scale = %s." % str(scale.shape)) elif scale.ndim > 2: raise ValueError("Array 'scale' must be at most two-dimensional," " but scale.ndim = %d" % scale.ndim) dim = scale.shape[0] if df is None: df = dim elif not np.isscalar(df): raise ValueError("Degrees of freedom must be a scalar.") elif df < dim: raise ValueError("Degrees of freedom cannot be less than dimension" " of scale matrix, but df = %d" % df) return dim, df, scale def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x * np.eye(dim)[:, :, np.newaxis] if x.ndim == 1: if dim == 1: x = x[np.newaxis, np.newaxis, :] else: x = np.diag(x)[:, :, np.newaxis] elif x.ndim == 2: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square if they are two" " dimensional, but x.shape = %s." % str(x.shape)) x = x[:, :, np.newaxis] elif x.ndim == 3: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square in the first two" " dimensions if they are three dimensional" ", but x.shape = %s." % str(x.shape)) elif x.ndim > 3: raise ValueError("Quantiles must be at most two-dimensional with" " an additional dimension for multiple" "components, but x.ndim = %d" % x.ndim) # Now we have 3-dim array; should have shape [dim, dim, *] if not x.shape[0:2] == (dim, dim): raise ValueError('Quantiles have incompatible dimensions: should' ' be %s, got %s.' % ((dim, dim), x.shape[0:2])) return x def _process_size(self, size): size = np.asarray(size) if size.ndim == 0: size = size[np.newaxis] elif size.ndim > 1: raise ValueError('Size must be an integer or tuple of integers;' ' thus must have dimension <= 1.' ' Got size.ndim = %s' % str(tuple(size))) n = size.prod() shape = tuple(size) return n, shape def _logpdf(self, x, dim, df, scale, log_det_scale, C): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triagular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ # log determinant of x # Note: x has components along the last axis, so that x.T has # components alone the 0-th axis. Then since det(A) = det(A'), this # gives us a 1-dim vector of determinants # Retrieve tr(scale^{-1} x) log_det_x = np.zeros(x.shape[-1]) scale_inv_x = np.zeros(x.shape) tr_scale_inv_x = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): _, log_det_x[i] = self._cholesky_logdet(x[:,:,i]) scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i]) tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace() # Log PDF out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + multigammaln(0.5*df, dim))) return out def logpdf(self, x, df, scale): """ Log of the Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) # Cholesky decomposition of scale, get log(det(scale)) C, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale, C) return _squeeze_output(out) def pdf(self, x, df, scale): """ Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ return df * scale def mean(self, df, scale): """ Mean of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ if df >= dim + 1: out = (df-dim-1) * scale else: out = None return out def mode(self, df, scale): """ Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) if out is not None else out def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ var = scale**2 diag = scale.diagonal() # 1 x dim array var += np.outer(diag, diag) var *= df return var def var(self, df, scale): """ Variance of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) def _standard_rvs(self, n, shape, dim, df, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : np.random.RandomState instance RandomState used for drawing the random variates. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ # Random normal variates for off-diagonal elements n_tril = dim * (dim-1) // 2 covariances = random_state.normal( size=n*n_tril).reshape(shape+(n_tril,)) # Random chi-square variates for diagonal elements variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T # Create the A matri(ces) - lower triangular A = np.zeros(shape + (dim, dim)) # Input the covariances size_idx = tuple([slice(None,None,None)]*len(shape)) tril_idx = np.tril_indices(dim, k=-1) A[size_idx + tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(dim) A[size_idx + diag_idx] = variances return A def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Calculate the matrices A, which are actually lower triangular # Cholesky factorizations of a matrix B such that B ~ W(df, I) A = self._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = C A A' C', where SA ~ W(df, scale) # Note: this is the product of a (lower) (lower) (lower)' (lower)' # or, denoting B = AA', it is C B C' where C is the lower # triangular Cholesky factorization of the scale matrix. # this appears to conflict with the instructions in [1]_, which # suggest that it should be D' B D where D is the lower # triangular factorization of the scale matrix. However, it is # meant to refer to the Bartlett (1933) representation of a # Wishart random variate as L A A' L' where L is lower triangular # so it appears that understanding D' to be upper triangular # is either a typo in or misreading of [1]_. for index in np.ndindex(shape): CA = np.dot(C, A[index]) A[index] = np.dot(CA, CA.T) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from a Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Cholesky decomposition of scale C = scipy.linalg.cholesky(scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def _entropy(self, dim, df, log_det_scale): """ Parameters ---------- dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'entropy' instead. """ return ( 0.5 * (dim+1) * log_det_scale + 0.5 * dim * (dim+1) * _LOG_2 + multigammaln(0.5*df, dim) - 0.5 * (df - dim - 1) * np.sum( [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] ) + 0.5 * df * dim ) def entropy(self, df, scale): """ Compute the differential entropy of the Wishart. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Wishart distribution Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) _, log_det_scale = self._cholesky_logdet(scale) return self._entropy(dim, df, log_det_scale) def _cholesky_logdet(self, scale): """ Compute Cholesky decomposition and determine (log(det(scale)). Parameters ---------- scale : ndarray Scale matrix. Returns ------- c_decomp : ndarray The Cholesky decomposition of `scale`. logdet : scalar The log of the determinant of `scale`. Notes ----- This computation of ``logdet`` is equivalent to ``np.linalg.slogdet(scale)``. It is ~2x faster though. """ c_decomp = scipy.linalg.cholesky(scale, lower=True) logdet = 2 * np.sum(np.log(c_decomp.diagonal())) return c_decomp, logdet wishart = wishart_gen() class wishart_frozen(multi_rv_frozen): """ Create a frozen Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ def __init__(self, df, scale, seed=None): self._dist = wishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale) self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale, self.C) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): return self._dist._entropy(self.dim, self.df, self.log_det_scale) # Set frozen generator docstrings from corresponding docstrings in # Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: method = wishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) from numpy import asarray_chkfinite, asarray from scipy.linalg.misc import LinAlgError from scipy.linalg.lapack import get_lapack_funcs def _cho_inv_batch(a, check_finite=True): """ Invert the matrices a_i, using a Cholesky factorization of A, where a_i resides in the last two dimensions of a and the other indices describe the index i. Overwrites the data in a. Parameters ---------- a : array Array of matrices to invert, where the matrices themselves are stored in the last two dimensions. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array Array of inverses of the matrices ``a_i``. See also -------- scipy.linalg.cholesky : Cholesky factorization of a matrix """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]: raise ValueError('expected square matrix in last two dimensions') potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,)) tril_idx = np.tril_indices(a.shape[-2], k=-1) triu_idx = np.triu_indices(a.shape[-2], k=1) for index in np.ndindex(a1.shape[:-2]): # Cholesky decomposition a1[index], info = potrf(a1[index], lower=True, overwrite_a=False, clean=False) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Inversion a1[index], info = potri(a1[index], lower=True, overwrite_c=False) if info > 0: raise LinAlgError("the inverse could not be computed") if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Make symmetric (dpotri only fills in the lower triangle) a1[index][triu_idx] = a1[index][tril_idx] return a1 class invwishart_gen(wishart_gen): r""" An inverse Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal covariance matrix. Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from an inverse Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" inverse Wishart random variable: rv = invwishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- wishart Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The inverse Wishart distribution is often denoted .. math:: W_p^{-1}(\nu, \Psi) where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the :math:`p \times p` scale matrix. The probability density function for `invwishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} \exp\left( -tr(\Sigma S^{-1}) / 2 \right) If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). If the scale matrix is 1-dimensional and equal to one, then the inverse Wishart distribution :math:`W_1(\nu, 1)` collapses to the inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` and scale = :math:`\frac{1}{2}`. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import invwishart, invgamma >>> x = np.linspace(0.01, 1, 100) >>> iw = invwishart.pdf(x, df=6, scale=1) >>> iw[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> ig = invgamma.pdf(x, 6/2., scale=1./2) >>> ig[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> plt.plot(x, iw) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(invwishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen inverse Wishart distribution. See `invwishart_frozen` for more information. """ return invwishart_frozen(df, scale, seed) def _logpdf(self, x, dim, df, scale, log_det_scale): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ log_det_x = np.zeros(x.shape[-1]) #scale_x_inv = np.zeros(x.shape) x_inv = np.copy(x).T if dim > 1: _cho_inv_batch(x_inv) # works in-place else: x_inv = 1./x_inv tr_scale_x_inv = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True) log_det_x[i] = 2 * np.sum(np.log(C.diagonal())) #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace() # Log PDF out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - multigammaln(0.5*df, dim)) return out def logpdf(self, x, df, scale): """ Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) _, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale) return _squeeze_output(out) def pdf(self, x, df, scale): """ Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ if df > dim + 1: out = scale / (df - dim - 1) else: out = None return out def mean(self, df, scale): """ Mean of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) if out is not None else out def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ return scale / (df + dim + 1) def mode(self, df, scale): """ Mode of the inverse Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ if df > dim + 3: var = (df - dim + 1) * scale**2 diag = scale.diagonal() # 1 x dim array var += (df - dim - 1) * np.outer(diag, diag) var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) else: var = None return var def var(self, df, scale): """ Variance of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) if out is not None else out def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triagular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Get random draws A such that A ~ W(df, I) A = super(invwishart_gen, self)._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) eye = np.eye(dim) trtrs = get_lapack_funcs(('trtrs'), (A,)) for index in np.ndindex(A.shape[:-2]): # Calculate CA CA = np.dot(C, A[index]) # Get (C A)^{-1} via triangular solver if dim > 1: CA, info = trtrs(CA, eye, lower=True) if info > 0: raise LinAlgError("Singular matrix.") if info < 0: raise ValueError('Illegal value in %d-th argument of' ' internal trtrs' % -info) else: CA = 1. / CA # Get SA A[index] = np.dot(CA.T, CA) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from an inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Invert the scale eye = np.eye(dim) L, lower = scipy.linalg.cho_factor(scale, lower=True) inv_scale = scipy.linalg.cho_solve((L, lower), eye) # Cholesky decomposition of inverted scale C = scipy.linalg.cholesky(inv_scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError invwishart = invwishart_gen() class invwishart_frozen(multi_rv_frozen): def __init__(self, df, scale, seed=None): """ Create a frozen inverse Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ self._dist = invwishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale ) # Get the determinant via Cholesky factorization C, lower = scipy.linalg.cho_factor(self.scale, lower=True) self.log_det_scale = 2 * np.sum(np.log(C.diagonal())) # Get the inverse using the Cholesky factorization eye = np.eye(self.dim) self.inv_scale = scipy.linalg.cho_solve((C, lower), eye) # Get the Cholesky factorization of the inverse scale self.C = scipy.linalg.cholesky(self.inv_scale, lower=True) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError # Set frozen generator docstrings from corresponding docstrings in # inverse Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: method = invwishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
bsd-3-clause
stylianos-kampakis/scikit-learn
sklearn/decomposition/truncated_svd.py
199
7744
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck <[email protected]> # Olivier Grisel <[email protected]> # Michael Becker <[email protected]> # License: 3-clause BSD. import numpy as np import scipy.sparse as sp try: from scipy.sparse.linalg import svds except ImportError: from ..utils.arpack import svds from ..base import BaseEstimator, TransformerMixin from ..utils import check_array, as_float_array, check_random_state from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). It is very similar to PCA, but operates on sample vectors directly, instead of on a covariance matrix. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithm: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional Number of iterations for randomized SVD solver. Not used by ARPACK. random_state : int or RandomState, optional (Seed for) pseudo-random number generator. If not given, the numpy.random singleton is used. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : array, shape (n_components, n_features) explained_variance_ratio_ : array, [n_components] Percentage of variance explained by each of the selected components. explained_variance_ : array, [n_components] The variance of the training samples transformed by a projection to each component. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.27930... See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0.): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = as_float_array(X, copy=False) random_state = check_random_state(self.random_state) # If sparse and not csr or csc, convert to csr if sp.issparse(X) and X.getformat() not in ["csr", "csc"]: X = X.tocsr() if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio X_transformed = np.dot(U, np.diag(Sigma)) self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse='csr') return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_)
bsd-3-clause
e-koch/mpld3
examples/mpld3_logo.py
19
3751
""" mpld3 Logo Idea =============== This example shows how mpld3 can be used to generate relatively intricate vector graphics in the browser. This is an adaptation of a logo proposal by github user debjan, in turn based on both the matplotlib and D3js logos. """ # Author: Jake VanderPlas import matplotlib.pyplot as plt from matplotlib import image, patches, colors from matplotlib.colors import colorConverter import numpy as np import mpld3 imsize = np.array([319, 217]) center = [108.5, 108.5] max_radius = 108.5 radii = np.linspace(16, max_radius, 5) angles = np.arange(0, 360, 45) fig = plt.figure(figsize=imsize / 50.) ax = fig.add_axes([0, 0, 1, 1], frameon=False, xticks=[], yticks=[]) # Create a clip path for the elements clip_path = patches.Rectangle((0, 0), imsize[0], imsize[1], transform=ax.transData) # Create the background gradient x = np.array([0, 104, 196, 300]) y = np.linspace(150, 450, 86)[:, None] c = np.cos(-np.pi / 4) s = np.sin(-np.pi / 4) X, Y = (c * x - s * y) - 116, (s * x + c * y) C = np.arange(255).reshape((3, 85)).T C = C[::-1, :] cmap = colors.LinearSegmentedColormap.from_list("mpld3", [[0.97, 0.6, 0.29], [0.97, 0.59, 0.27], [0.97, 0.58, 0.25], [0.95, 0.44, 0.34], [0.92, 0.51, 0.29], [0.68, 0.21, 0.20]]) mesh = ax.pcolormesh(X, Y, C, cmap=cmap, shading='gourand', zorder=0) mesh.set_clip_path(clip_path) # cut-off the background to form the "D" and "3" using white patches # (this could also be done with a clip path) kwargs = dict(fc='white', ec='none', zorder=1) ax.add_patch(patches.Rectangle([0, 0], center[0], imsize[1], **kwargs)) ax.add_patch(patches.Circle(center, radii[2], **kwargs)) ax.add_patch(patches.Wedge(center, 127, -90, 90, width=18.5, **kwargs)) ax.add_patch(patches.Circle((252, 66), 18, **kwargs)) ax.add_patch(patches.Rectangle([216, 48], 36, 36, **kwargs)) ax.add_patch(patches.Wedge((252, 66), 101, -90, 40.1, width=35, **kwargs)) ax.add_patch(patches.Circle((252, 151), 18, **kwargs)) ax.add_patch(patches.Rectangle([216, 133], 36, 36, **kwargs)) ax.add_patch(patches.Wedge((252, 151), 101, -40.1, 90, width=35, **kwargs)) ax.add_patch(patches.Rectangle([-200, -200], 719, 200, **kwargs)) ax.add_patch(patches.Rectangle([-200, -200], 200, 617, **kwargs)) ax.add_patch(patches.Rectangle([-200, imsize[1]], 719, 200, **kwargs)) ax.add_patch(patches.Rectangle([imsize[0], -200], 200, 617, **kwargs)) # plot circles and lines for radius in radii: ax.add_patch(patches.Circle(center, radius, lw=0.5, ec='gray', fc='none', zorder=2)) for angle in angles: dx, dy = np.sin(np.radians(angle)), np.cos(np.radians(angle)) ax.plot([max_radius * (1 - dx), max_radius * (1 + dx)], [max_radius * (1 - dy), max_radius * (1 + dy)], '-', color='gray', lw=0.5, zorder=2) # plot wedges within the graph wedges = [(98, 231, 258, '#FF6600'), (85, 170, 205, '#FFC500'), (60, 80, 103, '#7DFF78'), (96, 45, 58, '#FD7C1A'), (73, 291, 308, '#CCFF28'), (47, 146, 155, '#28FFCC'), (25, 340, 360, '#004AFF')] for (radius, theta1, theta2, color) in wedges: ax.add_patch(patches.Wedge(center, radius, theta1, theta2, fc=color, ec='black', alpha=0.6, zorder=3)) for patch in ax.patches: patch.set_clip_path(clip_path) ax.set_xlim(0, imsize[0]) ax.set_ylim(imsize[1], 0) #plt.savefig('mpld3.png') mpld3.show()
bsd-3-clause
kpolimis/paa_2017_social_media
Estimate_Facebook_Audience/pySocialWatcher/pysocialwatcher/utils.py
2
17460
# -*- coding: utf-8 -*- import json from tabulate import tabulate import pandas as pd import constants import itertools import logging import coloredlogs import time from multiprocessing import Process, Manager import numpy import requests import ast coloredlogs.install(level=logging.INFO) class RequestException(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class JsonFormatException(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class FatalException(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def print_error_warning(error_json, params): print_warning("Facebook Error Code: " + str(error_json["error"]["code"])) print_warning("Facebook Error Message: " + str(error_json["error"]["message"])) if error_json["error"].has_key("error_user_title") and error_json["error"].has_key("error_user_msg"): print_warning("Facebook: " + str(error_json["error"]["error_user_title"]) + "\n" + str( error_json["error"]["error_user_msg"])) print_warning("Facebook Trace Id: " + str(error_json["error"]["fbtrace_id"])) print_warning("Request Params : " + str(params)) def get_dataframe_from_json_response_query_data(json_response): dataframe = pd.DataFrame() for entry in json_response["data"]: entry_details = {} for field in constants.DETAILS_FIELD_FROM_FACEBOOK_TARGETING_SEARCH: entry_details[field] = entry[field] if field in entry else None dataframe = dataframe.append(entry_details, ignore_index=True) return dataframe def handle_send_request_error(response, url, params, tryNumber): try: error_json = json.loads(response.text) if error_json["error"]["code"] == constants.API_UNKOWN_ERROR_CODE_1 or error_json["error"]["code"] == constants.API_UNKOWN_ERROR_CODE_2: print_error_warning(error_json, params) time.sleep(constants.INITIAL_TRY_SLEEP_TIME * tryNumber) return send_request(url, params, tryNumber) elif error_json["error"]["code"] == constants.INVALID_PARAMETER_ERROR and error_json["error"]["error_subcode"] == constants.FEW_USERS_IN_CUSTOM_LOCATIONS_SUBCODE_ERROR: return get_fake_response() else: logging.error("Could not handle error.") raise FatalException(str(error_json["error"])) except: raise FatalException(str(response.text)) def send_request(url, params, tryNumber = 0): tryNumber += 1 if tryNumber >= constants.MAX_NUMBER_TRY: print_warning("Maxium Number of Tries reached. Failing.") raise FatalException("Maximum try reached.") try: response = requests.get(url, params=params) except Exception as error: raise RequestException(error.message) if response.status_code == 200: return response else: return handle_send_request_error(response, url, params, tryNumber) def call_request_fb(row, token, account): target_request = row[constants.TARGETING_FIELD] payload = { 'currency': 'USD', 'optimize_for': "NONE", 'targeting_spec': json.dumps(target_request), 'access_token': token, } print_info("\tSending in request: {}".format(payload)) url = constants.REACHESTIMATE_URL.format(account) response = send_request(url, payload) return response.content def get_fake_response(): response = requests.models.Response() response._content = constants.FAKE_DATA_RESPONSE_CONTENT response.status_code = 200 logging.warn("Fake Response created: " + response.content) return response def trigger_facebook_call(index, row, token, account, shared_queue): try: response = call_request_fb(row, token, account) shared_queue.put((index, response)) except RequestException: print_warning("Warning Facebook Request Failed") print_warning("Row: " + str(row)) print_warning("It will try again later") shared_queue.put((index, numpy.nan)) def add_timestamp(dataframe): dataframe["timestamp"] = constants.UNIQUE_TIME_ID return dataframe def add_published_platforms(dataframe, input_json): platforms = constants.PUBLISHER_PLATFORM_DEFAULT if constants.API_PUBLISHER_PLATFORMS_FIELD in input_json: platforms = input_json[constants.API_PUBLISHER_PLATFORMS_FIELD] dataframe[constants.API_PUBLISHER_PLATFORMS_FIELD] = json.dumps(platforms) return dataframe def trigger_request_process_and_return_response(rows_to_request): process_manager = Manager() shared_queue = process_manager.Queue() shared_queue_list = [] list_process = [] # Trigger Process in rows for index, row in rows_to_request.iterrows(): token, account = get_token_and_account_number_or_wait() p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue)) list_process.append(p) # Starting process map(lambda p: p.start(), list_process) # Stop process map(lambda p: p.join(), list_process) #Check for Exception map(lambda p: check_exception(p), list_process) # Put things from shared list to normal list while shared_queue.qsize() != 0: shared_queue_list.append(shared_queue.get()) return shared_queue_list def check_exception(p): if p.exitcode != 0: raise FatalException("FatalError: Check logging for clue. No way to proceed from here.") def print_info(message): logging.info(message) def unstrict_literal_eval(string): try: value = ast.literal_eval(string) return value except ValueError: return string except SyntaxError: return string def load_dataframe_from_file(file_path): dataframe = pd.DataFrame.from_csv(file_path) return dataframe.applymap(unstrict_literal_eval) def save_response_in_dataframe(shared_queue_list, df): for result_tuple in shared_queue_list: result_index = result_tuple[0] result_response = result_tuple[1] df.loc[result_index, "response"] = result_response def save_skeleton_dataframe(dataframe): print_info("Saving Skeleton file: " + constants.DATAFRAME_SKELETON_FILE_NAME) dataframe.to_csv(constants.DATAFRAME_SKELETON_FILE_NAME) def save_temporary_dataframe(dataframe): print_info("Saving temporary file: " + constants.DATAFRAME_TEMPORARY_COLLECTION_FILE_NAME) dataframe.to_csv(constants.DATAFRAME_TEMPORARY_COLLECTION_FILE_NAME) def save_after_collecting_dataframe(dataframe): print_info("Saving after collecting file: " + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME) dataframe.to_csv(constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME) def save_after_collecting_dataframe_without_full_response(dataframe): dataframe = dataframe.drop('response', 1) print_dataframe(dataframe) print_info("Saving after collecting file: " + constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME_WITHOUT_FULL_RESPONSE) dataframe.to_csv(constants.DATAFRAME_AFTER_COLLECTION_FILE_NAME_WITHOUT_FULL_RESPONSE) def print_warning(message): logging.warn(message) def load_json_data_from_response(response): response_content = response.content.encode('utf-8') return json.loads(response_content) def print_dataframe(df): print tabulate(df, headers='keys', tablefmt='psql', floatfmt=".0f") def build_initial_collection_dataframe(): return pd.DataFrame(columns= constants.DATAFRAME_COLUMNS) def get_all_combinations_from_input(input_data_json): to_combine_fields = {} for field in constants.INPUT_FIELDS_TO_COMBINE: try: if isinstance(input_data_json[field], list): field_content = input_data_json[field] to_combine_fields[field] = field_content if isinstance(input_data_json[field], dict): for intra_field_key in input_data_json[field].keys(): to_combine_fields[intra_field_key] = input_data_json[field][intra_field_key] # to_combine_fields[field] = build_AND_intra_field_combinations(input_data_json[field]) except KeyError: print_warning("Field not expecified: " + field) for field in to_combine_fields.keys(): for index, value in enumerate(to_combine_fields[field]): to_combine_fields[field][index] = (field, value) all_combinations = list(itertools.product(*to_combine_fields.values())) return all_combinations def build_AND_intra_field_combinations(intra_field_data): intra_fields = [] for field in intra_field_data.values(): intra_fields.append(field) teste = list(itertools.product(*intra_fields)) import ipdb;ipdb.set_trace() pass def add_list_of_ANDS_to_input(list_of_ANDS_between_groups,input_data_json): for interests_to_AND in list_of_ANDS_between_groups: names = [] and_ors = [] for interest_to_AND in interests_to_AND: names.append(interest_to_AND[constants.INPUT_NAME_FIELD]) if "or" not in interest_to_AND: raise Exception("Only AND of ors are supported") and_ors.append(interest_to_AND["or"]) new_and_interest = { constants.INPUT_NAME_FIELD : " AND ".join(names), "and_ors" : and_ors, "isAND" : True } input_data_json[constants.INPUT_INTEREST_FIELD].append(new_and_interest) def generate_collection_request_from_combination(current_combination, input_data): targeting = build_targeting(current_combination,input_data) dataframe_row = {} for field in current_combination: field_name = field[0] value = field[1] dataframe_row[field_name] = value dataframe_row[constants.ALLFIELDS_FIELD] = current_combination dataframe_row[constants.TARGETING_FIELD] = targeting dataframe_row[constants.INPUT_NAME_FIELD] = input_data[constants.INPUT_NAME_FIELD] return dataframe_row def select_common_fields_in_targeting(targeting, input_combination_dictionary): # Selecting Geolocation geo_location = input_combination_dictionary[constants.INPUT_GEOLOCATION_FIELD] if geo_location.has_key(constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD): location_type = geo_location[constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD] else: location_type = constants.DEFAULT_GEOLOCATION_LOCATION_TYPE_FIELD targeting[constants.API_GEOLOCATION_FIELD] = { geo_location["name"]: geo_location["values"], constants.INPUT_GEOLOCATION_LOCATION_TYPE_FIELD: location_type } # Selecting Age age_range = input_combination_dictionary[constants.INPUT_AGE_RANGE_FIELD] targeting[constants.API_MIN_AGE_FIELD] = age_range[constants.MIN_AGE] if age_range.has_key(constants.MIN_AGE) else None targeting[constants.API_MAX_AGE_FIELD] = age_range[constants.MAX_AGE] if age_range.has_key(constants.MAX_AGE) else None # Selecting genders gender = input_combination_dictionary[constants.INPUT_GENDER_FIELD] targeting[constants.API_GENDER_FIELD] = [gender] # Selecting Languages if input_combination_dictionary.has_key(constants.INPUT_LANGUAGE_FIELD): languages = input_combination_dictionary[constants.INPUT_LANGUAGE_FIELD] if languages: targeting[constants.API_LANGUAGES_FIELD] = languages["values"] else: print_warning("No field: " + constants.INPUT_LANGUAGE_FIELD) def get_api_field_name(field_name): return constants.INPUT_TO_API_FIELD_NAME[field_name] def process_audience_from_response(literal_response): audience = json.loads(literal_response)["data"]["users"] return int(audience) def post_process_collection(collection_dataframe): # For now just capture audience print_info("Computing Audience column") collection_dataframe["audience"] = collection_dataframe["response"].apply( lambda x: process_audience_from_response(x)) return collection_dataframe def select_advance_targeting_type_array_ids(segment_type, input_value, targeting): api_field_name = get_api_field_name(segment_type) if input_value: if input_value.has_key("or"): or_query = [] for or_id in input_value["or"]: or_query.append({"id" : or_id}) targeting["flexible_spec"].append({api_field_name: or_query}) if input_value.has_key("and"): for id_and in input_value["and"]: targeting["flexible_spec"].append({segment_type: {"id" : id_and}}) if input_value.has_key("not"): if not "exclusions" in targeting: targeting["exclusions"] = {} if not api_field_name in targeting["exclusions"].keys(): targeting["exclusions"][api_field_name] = [] for id_not in input_value["not"]: targeting["exclusions"][api_field_name].append({"id" : id_not}) if input_value.has_key("and_ors"): for or_ids in input_value["and_ors"]: or_query = [] for or_id in or_ids: or_query.append({"id" : or_id}) targeting["flexible_spec"].append({segment_type: or_query}) if not input_value.has_key("or") and not input_value.has_key("and") and not input_value.has_key("not") and not input_value.has_key("and_ors"): raise JsonFormatException("Something wrong with: " + str(input_value)) def get_interests_by_group_to_AND(input_data_json, groups_ids): interests_by_group_to_AND = {} for group_id in groups_ids: interests_by_group_to_AND[group_id] = [] for interest_input in input_data_json[constants.INPUT_INTEREST_FIELD]: if interest_input: if constants.GROUP_ID_FIELD in interest_input: interest_group_id = interest_input[constants.GROUP_ID_FIELD] if interest_group_id in interests_by_group_to_AND: interests_by_group_to_AND[interest_group_id].append(interest_input) return interests_by_group_to_AND def select_advance_targeting_type_array_integer(segment_type, input_value, targeting): api_field_name = get_api_field_name(segment_type) if input_value: try: targeting["flexible_spec"].append({api_field_name : input_value["or"]}) except: raise JsonFormatException("Something wrong with: " + str(input_value)) def select_advance_targeting_fields(targeting, input_combination_dictionary): # Selecting Advance Targeting targeting["flexible_spec"] = [] for advance_field in constants.ADVANCE_TARGETING_FIELDS_TYPE_ARRAY_IDS: if input_combination_dictionary.has_key(advance_field): select_advance_targeting_type_array_ids(advance_field, input_combination_dictionary[advance_field], targeting) for advance_field in constants.ADVANCE_TARGETING_FIELDS_TYPE_ARRAY_INTEGER: if input_combination_dictionary.has_key(advance_field): select_advance_targeting_type_array_integer(advance_field, input_combination_dictionary[advance_field], targeting) return targeting def select_publisher_platform(targeting, input_data): # Selecting Publisher Platform platform = constants.PUBLISHER_PLATFORM_DEFAULT if constants.API_PUBLISHER_PLATFORMS_FIELD in input_data: platform = input_data[constants.API_PUBLISHER_PLATFORMS_FIELD] targeting[constants.API_PUBLISHER_PLATFORMS_FIELD] = platform def build_targeting(current_combination, input_data): targeting = {} input_combination_dictionary = dict(current_combination) select_common_fields_in_targeting(targeting, input_combination_dictionary) select_advance_targeting_fields(targeting, input_combination_dictionary) select_publisher_platform(targeting, input_data) return targeting def get_token_and_account_number_or_wait(): if not "used_tokens_time_map" in globals(): global used_tokens_time_map used_tokens_time_map = {} while True: for token, account in constants.TOKENS: if used_tokens_time_map.has_key(token): last_used_time = used_tokens_time_map[token] time_since_used = time.time() - last_used_time if time_since_used > constants.SLEEP_TIME: used_tokens_time_map[token] = time.time() return token, account else: used_tokens_time_map[token] = time.time() return token, account time.sleep(1) def print_collecting_progress(uncomplete_df, df): full_size = len(df) uncomplete_df_size = len(uncomplete_df) print_info("Collecting... Completed: {:.2f}% , {:d}/{:d}".format((float(full_size - uncomplete_df_size) / full_size * 100), full_size - uncomplete_df_size, full_size)) def send_dumb_query(token, account): try: row = pd.Series() row[constants.TARGETING_FIELD] = constants.DEFAULT_DUMB_TARGETING call_request_fb(row, token, account) except Exception as error: print_warning("Token or Account Number Error:") print_warning("Token:" + token) print_warning("Account:" + account) raise error
mit
ChinaQuants/blaze
blaze/compute/tests/test_mysql_compute.py
3
1619
from __future__ import absolute_import, print_function, division from getpass import getuser import pytest sa = pytest.importorskip('sqlalchemy') pytest.importorskip('pymysql') from odo import odo, drop, discover import pandas as pd from blaze import symbol, compute from blaze.utils import example, normalize @pytest.yield_fixture(scope='module') def data(): try: t = odo( example('nyc.csv'), 'mysql+pymysql://%s@localhost/test::nyc' % getuser() ) except sa.exc.OperationalError as e: pytest.skip(str(e)) else: try: yield t.bind finally: drop(t) @pytest.fixture def db(data): return symbol('test', discover(data)) def test_agg_sql(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data) expected = """ select min(alias.passenger_count) as passenger_count_min from (select nyc.passenger_count as passenger_count from nyc where nyc.passenger_count < %s) as alias """ assert normalize(str(result)) == normalize(expected) def test_agg_compute(db, data): subset = db.nyc[['pickup_datetime', 'dropoff_datetime', 'passenger_count']] expr = subset[subset.passenger_count < 4].passenger_count.min() result = compute(expr, data) passenger_count = odo(compute(db.nyc.passenger_count, {db: data}), pd.Series) assert passenger_count[passenger_count < 4].min() == result.scalar()
bsd-3-clause
cimat/data-visualization-patterns
display-patterns/Hierarchies/Pruebas/A62Tree_Map_Matplotlib.py
1
1800
import pylab import random import matplotlib.pyplot as plt from matplotlib.patches import Rectangle class Treemap: def __init__(self, tree, iter_method, size_method, color_method): self.ax = pylab.subplot(111,aspect='equal') pylab.subplots_adjust(left=0, right=1, top=1, bottom=0) self.ax.set_xticks([]) self.ax.set_yticks([]) self.size_method = size_method self.iter_method = iter_method self.color_method = color_method self.addnode(tree) def addnode(self, node, lower=[0,0], upper=[1,1], axis=0): axis = axis % 2 self.draw_rectangle(lower, upper, node) width = upper[axis] - lower[axis] try: for child in self.iter_method(node): upper[axis] = lower[axis] + (width * float(size(child))) / size(node) self.addnode(child, list(lower), list(upper), axis + 1) lower[axis] = upper[axis] except TypeError: pass def draw_rectangle(self, lower, upper, node): r = Rectangle( lower, upper[0]-lower[0], upper[1] - lower[1], edgecolor='k', facecolor= self.color_method(node)) self.ax.add_patch(r) if __name__ == '__main__': size_cache = {} def size(thing): if isinstance(thing, int): return thing if thing in size_cache: return size_cache[thing] else: size_cache[thing] = reduce(int.__add__, [size(x) for x in thing]) return size_cache[thing] def random_color(thing): return (random.random(),random.random(),random.random()) tree=((2,12),((4,(1,2)),(8,(1,2)))) Treemap(tree, iter, size, random_color) plt.show()
cc0-1.0
ltiao/scikit-learn
sklearn/manifold/t_sne.py
52
34602
# Author: Alexander Fabisch -- <[email protected]> # Author: Christopher Moody <[email protected]> # Author: Nick Travers <[email protected]> # License: BSD 3 clause (C) 2014 # This is the exact and Barnes-Hut t-SNE implementation. There are other # modifications of the algorithm: # * Fast Optimization for t-SNE: # http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf import numpy as np from scipy import linalg import scipy.sparse as sp from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform from ..neighbors import BallTree from ..base import BaseEstimator from ..utils import check_array from ..utils import check_random_state from ..utils.extmath import _ravel from ..decomposition import RandomizedPCA from ..metrics.pairwise import pairwise_distances from . import _utils from . import _barnes_hut_tsne from ..utils.fixes import astype MACHINE_EPSILON = np.finfo(np.double).eps def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = astype(distances, np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = astype(distances, np.float32, copy=False) neighbors = astype(neighbors, np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) m = "All probabilities should be finite" assert np.all(np.isfinite(conditional_P)), m P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) assert np.all(np.abs(P) <= 1.0) return P def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= degrees_of_freedom n **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) # Gradient: dC/dY grad = np.ndarray((n_samples, n_components)) PQd = squareform((P - Q) * n) for i in range(skip_num_points, n_samples): np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i]) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples, n_components): """t-SNE objective function: the absolute error of the KL divergence of p_ijs and q_ijs. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. neighbors : array (n_samples, K) The neighbors is not actually required to calculate the divergence, but is here to match the signature of the gradient function degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution n = pdist(X_embedded, "sqeuclidean") n += 1. n /= degrees_of_freedom n **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) if len(P.shape) == 2: P = squareform(P) kl_divergence = 2.0 * np.dot(P, np.log(P / Q)) return kl_divergence def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. neighbors: int64 array, shape (n_samples, K) Array with element [i, j] giving the index for the jth closest neighbor to point i. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ params = astype(params, np.float32, copy=False) X_embedded = params.reshape(n_samples, n_components) neighbors = astype(neighbors, np.int64, copy=False) if len(P.shape) == 1: sP = squareform(P).astype(np.float32) else: sP = P.astype(np.float32) grad = np.zeros(X_embedded.shape, dtype=np.float32) error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors, grad, angle, n_components, verbose, dof=degrees_of_freedom) c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad = grad.ravel() grad *= c return error, grad def _gradient_descent(objective, p0, it, n_iter, objective_error=None, n_iter_check=1, n_iter_without_progress=50, momentum=0.5, learning_rate=1000.0, min_gain=0.01, min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. objective_error : function or callable Should return a tuple of cost and gradient for a given parameter vector. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.5) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 1000.0) The learning rate should be extremely high for t-SNE! Values in the range [100.0, 1000.0] are common. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. min_error_diff : float, optional (default: 1e-7) If the absolute difference of two successive cost function values is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = 0 for i in range(it, n_iter): new_error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad >= 0.0 dec = np.invert(inc) gains[inc] += 0.05 gains[dec] *= 0.95 np.clip(gains, min_gain, np.inf) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: if new_error is None: new_error = objective_error(p, *args) error_diff = np.abs(new_error - error) error = new_error if verbose >= 2: m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f" print(m % (i + 1, error, grad_norm)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break if error_diff <= min_error_diff: if verbose >= 2: m = "[t-SNE] Iteration %d: error difference %f. Finished." print(m % (i + 1, error_diff)) break if new_error is not None: error = new_error return p, error, i def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i (r(i, j) - k)} where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t class TSNE(BaseEstimator): """t-distributed Stochastic Neighbor Embedding. t-SNE [1] is a tool to visualize high-dimensional data. It converts similarities between data points to joint probabilities and tries to minimize the Kullback-Leibler divergence between the joint probabilities of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost function that is not convex, i.e. with different initializations we can get different results. It is highly recommended to use another dimensionality reduction method (e.g. PCA for dense data or TruncatedSVD for sparse data) to reduce the number of dimensions to a reasonable amount (e.g. 50) if the number of features is very high. This will suppress some noise and speed up the computation of pairwise distances between samples. For more tips see Laurens van der Maaten's FAQ [2]. Read more in the :ref:`User Guide <t_sne>`. Parameters ---------- n_components : int, optional (default: 2) Dimension of the embedded space. perplexity : float, optional (default: 30) The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selcting a value between 5 and 50. The choice is not extremely critical since t-SNE is quite insensitive to this parameter. early_exaggeration : float, optional (default: 4.0) Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. For larger values, the space between natural clusters will be larger in the embedded space. Again, the choice of this parameter is not very critical. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. learning_rate : float, optional (default: 1000) The learning rate can be a critical parameter. It should be between 100 and 1000. If the cost function increases during initial optimization, the early exaggeration factor or the learning rate might be too high. If the cost function gets stuck in a bad local minimum increasing the learning rate helps sometimes. n_iter : int, optional (default: 1000) Maximum number of iterations for the optimization. Should be at least 200. n_iter_without_progress : int, optional (default: 30) Maximum number of iterations without progress before we abort the optimization. min_grad_norm : float, optional (default: 1E-7) If the gradient norm is below this threshold, the optimization will be aborted. metric : string or callable, optional The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. The default is "euclidean" which is interpreted as squared euclidean distance. init : string, optional (default: "random") Initialization of embedding. Possible options are 'random' and 'pca'. PCA initialization cannot be used with precomputed distances and is usually more globally stable than random initialization. verbose : int, optional (default: 0) Verbosity level. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Note that different initializations might result in different local minima of the cost function. method : string (default: 'barnes_hut') By default the gradient calculation algorithm uses Barnes-Hut approximation running in O(NlogN) time. method='exact' will run on the slower, but exact, algorithm in O(N^2) time. The exact algorithm should be used when nearest-neighbor errors need to be better than 3%. However, the exact method cannot scale to millions of examples. angle : float (default: 0.5) Only used if method='barnes_hut' This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. Attributes ---------- embedding_ : array-like, shape (n_samples, n_components) Stores the embedding vectors. Examples -------- >>> import numpy as np >>> from sklearn.manifold import TSNE >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = TSNE(n_components=2, random_state=0) >>> np.set_printoptions(suppress=True) >>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE array([[ 0.00017599, 0.00003993], [ 0.00009891, 0.00021913], [ 0.00018554, -0.00009357], [ 0.00009528, -0.00001407]]) References ---------- [1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008. [2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding http://homepage.tudelft.nl/19j49/t-SNE.html [3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms. Journal of Machine Learning Research 15(Oct):3221-3245, 2014. http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf """ def __init__(self, n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30, min_grad_norm=1e-7, metric="euclidean", init="random", verbose=0, random_state=None, method='barnes_hut', angle=0.5): if init not in ["pca", "random"] or isinstance(init, np.ndarray): msg = "'init' must be 'pca', 'random' or a NumPy array" raise ValueError(msg) self.n_components = n_components self.perplexity = perplexity self.early_exaggeration = early_exaggeration self.learning_rate = learning_rate self.n_iter = n_iter self.n_iter_without_progress = n_iter_without_progress self.min_grad_norm = min_grad_norm self.metric = metric self.init = init self.verbose = verbose self.random_state = random_state self.method = method self.angle = angle self.embedding_ = None def _fit(self, X, skip_num_points=0): """Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TrucnatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. """ if self.method not in ['barnes_hut', 'exact']: raise ValueError("'method' must be 'barnes_hut' or 'exact'") if self.angle < 0.0 or self.angle > 1.0: raise ValueError("'angle' must be between 0.0 - 1.0") if self.method == 'barnes_hut' and sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense ' 'data is required for method="barnes_hut". Use ' 'X.toarray() to convert to a dense numpy array if ' 'the array is small enough for it to fit in ' 'memory. Otherwise consider dimensionality ' 'reduction techniques (e.g. TruncatedSVD)') X = check_array(X, dtype=np.float32) else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is " "%f" % self.early_exaggeration) if self.n_iter < 200: raise ValueError("n_iter should be at least 200") if self.metric == "precomputed": if self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be used " "with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if not np.all(distances >= 0): raise ValueError("All distances should be positive, either " "the metric or precomputed distances given " "as X are not correct") # Degrees of freedom of the Student's t-distribution. The suggestion # degrees_of_freedom = n_components - 1 comes from # "Learning a Parametric Embedding by Preserving Local Structure" # Laurens van der Maaten, 2009. degrees_of_freedom = max(self.n_components - 1.0, 1) n_samples = X.shape[0] # the number of nearest neighbors to find k = min(n_samples - 1, int(3. * self.perplexity + 1)) neighbors_nn = None if self.method == 'barnes_hut': if self.verbose: print("[t-SNE] Computing %i nearest neighbors..." % k) if self.metric == 'precomputed': # Use the precomputed distances to find # the k nearest neighbors and their distances neighbors_nn = np.argsort(distances, axis=1)[:, :k] else: # Find the nearest neighbors for every point bt = BallTree(X) # LvdM uses 3 * perplexity as the number of neighbors # And we add one to not count the data point itself # In the event that we have very small # of points # set the neighbors to n - 1 distances_nn, neighbors_nn = bt.query(X, k=k + 1) neighbors_nn = neighbors_nn[:, 1:] P = _joint_probabilities_nn(distances, neighbors_nn, self.perplexity, self.verbose) else: P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), "All probabilities should be finite" assert np.all(P >= 0), "All probabilities should be zero or positive" assert np.all(P <= 1), ("All probabilities should be less " "or then equal to one") if self.init == 'pca': pca = RandomizedPCA(n_components=self.n_components, random_state=random_state) X_embedded = pca.fit_transform(X) elif isinstance(self.init, np.ndarray): X_embedded = self.init elif self.init == 'random': X_embedded = None else: raise ValueError("Unsupported initialization scheme: %s" % self.init) return self._tsne(P, degrees_of_freedom, n_samples, random_state, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points) def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded=None, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with three stages: # * early exaggeration with momentum 0.5 # * early exaggeration with momentum 0.8 # * final optimization with momentum 0.8 # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. if X_embedded is None: # Initialize embedding randomly X_embedded = 1e-4 * random_state.randn(n_samples, self.n_components) params = X_embedded.ravel() opt_args = {} opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0, "learning_rate": self.learning_rate, "verbose": self.verbose, "n_iter_check": 25, "kwargs": dict(skip_num_points=skip_num_points)} if self.method == 'barnes_hut': m = "Must provide an array of neighbors to use Barnes-Hut" assert neighbors is not None, m obj_func = _kl_divergence_bh objective_error = _kl_divergence_error sP = squareform(P).astype(np.float32) neighbors = neighbors.astype(np.int64) args = [sP, neighbors, degrees_of_freedom, n_samples, self.n_components] opt_args['args'] = args opt_args['min_grad_norm'] = 1e-3 opt_args['n_iter_without_progress'] = 30 # Don't always calculate the cost since that calculation # can be nearly as expensive as the gradient opt_args['objective_error'] = objective_error opt_args['kwargs']['angle'] = self.angle opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence opt_args['args'] = [P, degrees_of_freedom, n_samples, self.n_components] opt_args['min_error_diff'] = 0.0 opt_args['min_grad_norm'] = 0.0 # Early exaggeration P *= self.early_exaggeration params, error, it = _gradient_descent(obj_func, params, **opt_args) opt_args['n_iter'] = 100 opt_args['momentum'] = 0.8 opt_args['it'] = it + 1 params, error, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] Error after %d iterations with early " "exaggeration: %f" % (it + 1, error)) # Save the final number of iterations self.n_iter_final = it # Final optimization P /= self.early_exaggeration opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 params, error, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, error)) X_embedded = params.reshape(n_samples, self.n_components) return X_embedded def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ embedding = self._fit(X) self.embedding_ = embedding return self.embedding_ def fit(self, X, y=None): """Fit X into an embedded space. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. """ self.fit_transform(X) return self def _check_fitted(self): if self.embedding_ is None: raise ValueError("Cannot call `transform` unless `fit` has" "already been called")
bsd-3-clause
cavestruz/L500analysis
plotting/profiles/T_evolution/Tall_evolution/plot_Tall_r500c.py
2
2944
from L500analysis.data_io.get_cluster_data import GetClusterData from L500analysis.utils.utils import aexp2redshift from L500analysis.plotting.tools.figure_formatting import * from L500analysis.plotting.profiles.tools.profiles_percentile \ import * from L500analysis.utils.constants import rbins from derived_field_functions import * color = matplotlib.cm.afmhot_r matplotlib.rcParams['legend.handlelength'] = 0 matplotlib.rcParams['legend.numpoints'] = 1 matplotlib.rcParams['legend.fontsize'] = 12 aexps = [1.0,0.9,0.8,0.7,0.6,0.5,0.45,0.4,0.35] db_name = 'L500_NR_0' db_dir = '/home/babyostrich/Documents/Repos/L500analysis/' profiles_list = ['T_mw', 'r_mid', 'vel_gas_rad_std', 'vel_gas_tan_std', 'vel_gas_rad_avg', 'vel_gas_tan_avg', 'Tnt/T500c','Ttot/T500c','T_mw/T500c', 'R/R500c'] halo_properties_list=['r500c','M_total_500c','nu_500c'] Tratio=r"$\tilde{T}=T(R)/T_{500c}$" fTz0=r"$\tilde{T}/\tilde{T}(z=1)$" pa = PlotAxes(figname='Tall_r500c', axes=[[0.15,0.4,0.80,0.55],[0.15,0.15,0.80,0.24]], axes_labels=[Tratio,fTz0], xlabel=r"$R/R_{500c}$", xlim=(0.2,5), ylims=[(0.1,1.19),(0.6,1.4)]) Tmw={} Tnt={} Ttot={} Tplots = [Tmw,Tnt,Ttot] clkeys = ['T_mw/T500c','Tnt/T500c','Ttot/T500c'] linestyles = [':','-.','-'] Tmw_frac={} Tnt_frac={} Ttot_frac={} for aexp in aexps : cldata = GetClusterData(aexp=aexp,db_name=db_name, db_dir=db_dir, profiles_list=profiles_list, halo_properties_list=halo_properties_list) for Tplot, key in zip(Tplots,clkeys) : Tplot[aexp] = calculate_profiles_mean_variance(cldata[key]) pa.axes[Tratio].plot( rbins, Tmw[aexp]['mean'],color=color(aexp),ls=':' ) pa.axes[Tratio].plot( rbins, Tnt[aexp]['mean'],color=color(aexp),ls='-.' ) pa.axes[Tratio].plot( rbins, Ttot[aexp]['mean'],color=color(aexp),ls='-', label="$z=%3.1f$" % aexp2redshift(aexp)) pa.axes[Tratio].fill_between(rbins, Ttot[0.5]['down'], Ttot[0.5]['up'], color=color(0.5), zorder=0) for aexp in aexps : for T,ls in zip(Tplots,linestyles) : fractional_evolution = get_profiles_division_mean_variance( mean_profile1=T[aexp]['mean'], var_profile1=T[aexp]['var'], mean_profile2=T[0.5]['mean'], var_profile2=T[0.5]['var'], ) pa.axes[fTz0].plot( rbins, fractional_evolution['mean'], color=color(aexp),ls=ls) pa.axes[Tratio].tick_params(labelsize=12) pa.axes[Tratio].tick_params(labelsize=12) pa.axes[fTz0].set_yticks(arange(0.6,1.4,0.2)) pa.set_legend(axes_label=Tratio,ncol=3,loc='best', frameon=False) pa.color_legend_texts(axes_label=Tratio) pa.savefig()
mit
Edu-Glez/Bank_sentiment_analysis
env/lib/python3.6/site-packages/pandas/io/sas/sasreader.py
7
2128
""" Read SAS sas7bdat or xport files. """ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): """ Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None If None, file format is inferred. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. Returns ------- DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader """ if format is None: try: fname = filepath_or_buffer.lower() if fname.endswith(".xpt"): format = "xport" elif fname.endswith(".sas7bdat"): format = "sas7bdat" else: raise ValueError("unable to infer format of SAS file") except: pass if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) elif format.lower() == 'sas7bdat': from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) else: raise ValueError('unknown SAS format') if iterator or chunksize: return reader data = reader.read() reader.close() return data
apache-2.0
WhittKinley/aima-python
submissions/aartiste/myKMeans.py
13
4852
from sklearn.cluster import KMeans import traceback from submissions.aartiste import election from submissions.aartiste import county_demographics class DataFrame: data = [] feature_names = [] target = [] target_names = [] trumpECHP = DataFrame() ''' Extract data from the CORGIS elections, and merge it with the CORGIS demographics. Both data sets are organized by county and state. ''' joint = {} elections = election.get_results() for county in elections: try: st = county['Location']['State Abbreviation'] countyST = county['Location']['County'] + st trump = county['Vote Data']['Donald Trump']['Percent of Votes'] joint[countyST] = {} joint[countyST]['ST']= st joint[countyST]['Trump'] = trump except: traceback.print_exc() demographics = county_demographics.get_all_counties() for county in demographics: try: countyNames = county['County'].split() cName = ' '.join(countyNames[:-1]) st = county['State'] countyST = cName + st # elderly = # college = # home = # poverty = if countyST in joint: joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"] joint[countyST]['HighSchool'] = county['Education']["High School or Higher"] joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"] joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"] joint[countyST]['Persons'] = county['Housing']["Persons per Household"] joint[countyST]['Home'] = county['Housing']["Homeownership Rate"] joint[countyST]['Income'] = county['Income']["Median Houseold Income"] joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"] joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"] except: traceback.print_exc() ''' Remove the counties that did not appear in both samples. ''' intersection = {} for countyST in joint: if 'College' in joint[countyST]: intersection[countyST] = joint[countyST] trumpECHP.data = [] ''' Build the input frame, row by row. ''' for countyST in intersection: # choose the input values row = [] for key in intersection[countyST]: if key in ['ST', 'Trump']: continue row.append(intersection[countyST][key]) trumpECHP.data.append(row) firstCounty = next(iter(intersection.keys())) firstRow = intersection[firstCounty] trumpECHP.feature_names = list(firstRow.keys()) trumpECHP.feature_names.remove('ST') trumpECHP.feature_names.remove('Trump') ''' Build the target list, one entry for each row in the input frame. The Naive Bayesian network is a classifier, i.e. it sorts data points into bins. The best it can do to estimate a continuous variable is to break the domain into segments, and predict the segment into which the variable's value will fall. In this example, I'm breaking Trump's % into two arbitrary segments. ''' trumpECHP.target = [] def trumpTarget(percentage): if percentage > 45: return 1 return 0 for countyST in intersection: # choose the target tt = trumpTarget(intersection[countyST]['Trump']) trumpECHP.target.append(tt) trumpECHP.target_names = [ 'Trump <= 45%', 'Trump > 45%', ] ''' Try scaling the data. ''' trumpScaled = DataFrame() def setupScales(grid): global min, max min = list(grid[0]) max = list(grid[0]) for row in range(1, len(grid)): for col in range(len(grid[row])): cell = grid[row][col] if cell < min[col]: min[col] = cell if cell > max[col]: max[col] = cell def scaleGrid(grid): newGrid = [] for row in range(len(grid)): newRow = [] for col in range(len(grid[row])): try: cell = grid[row][col] scaled = (cell - min[col]) \ / (max[col] - min[col]) newRow.append(scaled) except: pass newGrid.append(newRow) return newGrid setupScales(trumpECHP.data) trumpScaled.data = scaleGrid(trumpECHP.data) trumpScaled.feature_names = trumpECHP.feature_names trumpScaled.target = trumpECHP.target trumpScaled.target_names = trumpECHP.target_names ''' Make a customn classifier, ''' km = KMeans( n_clusters=2, # max_iter=300, # n_init=10, # init='k-means++', # algorithm='auto', # precompute_distances='auto', # tol=1e-4, # n_jobs=-1, # random_state=numpy.RandomState, # verbose=0, # copy_x=True, ) Examples = { 'Trump': { 'frame': trumpScaled, }, 'TrumpCustom': { 'frame': trumpScaled, 'kmeans': km }, }
mit
frozstone/concept
Cluster.py
1
1813
import community import networkx as nx import matplotlib.pyplot as plt from collections import OrderedDict from lxml import etree class Cluster: def __print_cluster(self, partition, math_map, desc_map): n_cluster = max(partition.values()) clusters = OrderedDict.fromkeys(range(n_cluster+1)) for node, cl in partition.iteritems(): if clusters[cl] is None: clusters[cl] = [] clusters[cl].append(node) mid_prefix = math_map.items()[0][0] mid_prefix = mid_prefix[:mid_prefix.rindex("_")] for ckey, cnodes in clusters.iteritems(): print ckey for node in cnodes: if node == 0: continue gmid = "%s_%d" % (mid_prefix, node) mml = math_map[gmid] etree.strip_tags(mml, "*") desc = desc_map[gmid] if gmid in desc_map else "" print mml.text.encode("utf-8"), desc print "" def comm_detect(self, labels, edges, math_map, desc_map): g = nx.from_numpy_matrix(edges) partition = community.best_partition(g) self.__print_cluster(partition, math_map, desc_map) partresult = partition.values() pos = nx.spring_layout(g) nx.draw_networkx_nodes(g, pos, node_color = partition.values()) nx.draw_networkx_labels(g, pos, labels, font_size=16) nx.draw_networkx_edges(g, pos) plt.show() def comm_detect_g(self, g): partition = community.best_partition(g) partresult = partition.values() pos = nx.spring_layout(g) nx.draw_networkx_nodes(g, pos, node_color = partition.values()) #nx.draw_networkx_labels(g, pos, labels, font_size=16) nx.draw_networkx_edges(g, pos) plt.show()
mit
wlamond/scikit-learn
sklearn/base.py
15
19725
"""Base classes for all estimators.""" # Author: Gael Varoquaux <[email protected]> # License: BSD 3 clause import copy import warnings import numpy as np from scipy import sparse from .externals import six from .utils.fixes import signature from . import __version__ ############################################################################## def _first_and_last_element(arr): """Returns first and last element of numpy array or sparse matrix.""" if isinstance(arr, np.ndarray) or hasattr(arr, 'data'): # numpy array or sparse matrix with .data attribute data = arr.data if sparse.issparse(arr) else arr return data.flat[0], data.flat[-1] else: # Sparse matrices without .data attribute. Only dok_matrix at # the time of writing, in this case indexing is fast return arr[0, 0], arr[-1, -1] def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deep copy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in six.iteritems(new_object_params): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is param2: # this should always happen continue if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False elif (param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and param2.ndim > 0 and param2.shape[0] > 0): equality_test = ( param1.shape == param2.shape and param1.dtype == param2.dtype and (_first_and_last_element(param1) == _first_and_last_element(param2)) ) else: equality_test = np.all(param1 == param2) elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False elif param1.size == 0 or param2.size == 0: equality_test = ( param1.__class__ == param2.__class__ and param1.size == 0 and param2.size == 0 ) else: equality_test = ( param1.__class__ == param2.__class__ and (_first_and_last_element(param1) == _first_and_last_element(param2)) and param1.nnz == param2.nnz and param1.shape == param2.shape ) else: # fall back on standard equality equality_test = param1 == param2 if equality_test: warnings.warn("Estimator %s modifies parameters in __init__." " This behavior is deprecated as of 0.18 and " "support for this behavior will be removed in 0.20." % type(estimator).__name__, DeprecationWarning) else: raise RuntimeError('Cannot clone object %s, as the constructor ' 'does not seem to set parameter %s' % (estimator, name)) return new_object ############################################################################### def _pprint(params, offset=0, printer=repr): """Pretty print the dictionary 'params' Parameters ---------- params : dict The dictionary to pretty print offset : int The offset in characters to add at the begin of each line. printer : callable The function to convert entries to strings, typically the builtin str or repr """ # Do a multi-line justified repr: options = np.get_printoptions() np.set_printoptions(precision=5, threshold=64, edgeitems=2) params_list = list() this_line_length = offset line_sep = ',\n' + (1 + offset // 2) * ' ' for i, (k, v) in enumerate(sorted(six.iteritems(params))): if type(v) is float: # use str for representing floating point numbers # this way we get consistent representation across # architectures and versions. this_repr = '%s=%s' % (k, str(v)) else: # use repr of the rest this_repr = '%s=%s' % (k, printer(v)) if len(this_repr) > 500: this_repr = this_repr[:300] + '...' + this_repr[-100:] if i > 0: if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr): params_list.append(line_sep) this_line_length = len(line_sep) else: params_list.append(', ') this_line_length += 2 params_list.append(this_repr) this_line_length += len(this_repr) np.set_printoptions(**options) lines = ''.join(params_list) # Strip trailing space to avoid nightmare in doctests lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n')) return lines ############################################################################### class BaseEstimator(object): """Base class for all estimators in scikit-learn Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). """ @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, 'deprecated_original', cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = signature(init) # Consider the constructor parameters excluding 'self' parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature)) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): # We need deprecation warnings to always be on in order to # catch deprecated param values. # This is set in utils/__init__.py but it gets overwritten # when running under python3 somehow. warnings.simplefilter("always", DeprecationWarning) try: with warnings.catch_warnings(record=True) as w: value = getattr(self, key, None) if len(w) and w[0].category == DeprecationWarning: # if the parameter is deprecated, don't show it continue finally: warnings.filters.pop(0) # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False), offset=len(class_name),),) def __getstate__(self): try: state = super(BaseEstimator, self).__getstate__() except AttributeError: state = self.__dict__.copy() if type(self).__module__.startswith('sklearn.'): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith('sklearn.'): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( "Trying to unpickle estimator {0} from version {1} when " "using version {2}. This might lead to breaking code or " "invalid results. Use at your own risk.".format( self.__class__.__name__, pickle_version, __version__), UserWarning) try: super(BaseEstimator, self).__setstate__(state) except AttributeError: self.__dict__.update(state) ############################################################################### class ClassifierMixin(object): """Mixin class for all classifiers in scikit-learn.""" _estimator_type = "classifier" def score(self, X, y, sample_weight=None): """Returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True labels for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) wrt. y. """ from .metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) ############################################################################### class RegressorMixin(object): """Mixin class for all regression estimators in scikit-learn.""" _estimator_type = "regressor" def score(self, X, y, sample_weight=None): """Returns the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as (1 - u/v), where u is the residual sum of squares ((y_true - y_pred) ** 2).sum() and v is the total sum of squares ((y_true - y_true.mean()) ** 2).sum(). The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : array-like, shape = (n_samples, n_features) Test samples. y : array-like, shape = (n_samples) or (n_samples, n_outputs) True values for X. sample_weight : array-like, shape = [n_samples], optional Sample weights. Returns ------- score : float R^2 of self.predict(X) wrt. y. """ from .metrics import r2_score return r2_score(y, self.predict(X), sample_weight=sample_weight, multioutput='variance_weighted') ############################################################################### class ClusterMixin(object): """Mixin class for all cluster estimators in scikit-learn.""" _estimator_type = "clusterer" def fit_predict(self, X, y=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : ndarray, shape (n_samples, n_features) Input data. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X) return self.labels_ class BiclusterMixin(object): """Mixin class for all bicluster estimators in scikit-learn""" @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the i'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Returns ------- row_ind : np.array, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : np.array, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the i'th bicluster. Returns ------- shape : (int, int) Number of rows and columns (resp.) in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Returns the submatrix corresponding to bicluster `i`. Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ from .utils.validation import check_array data = check_array(data, accept_sparse='csr') row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] ############################################################################### class TransformerMixin(object): """Mixin class for all transformers in scikit-learn.""" def fit_transform(self, X, y=None, **fit_params): """Fit to data, then transform it. Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X. Parameters ---------- X : numpy array of shape [n_samples, n_features] Training set. y : numpy array of shape [n_samples] Target values. Returns ------- X_new : numpy array of shape [n_samples, n_features_new] Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm if y is None: # fit method of arity 1 (unsupervised transformation) return self.fit(X, **fit_params).transform(X) else: # fit method of arity 2 (supervised transformation) return self.fit(X, y, **fit_params).transform(X) class DensityMixin(object): """Mixin class for all density estimators in scikit-learn.""" _estimator_type = "DensityEstimator" def score(self, X, y=None): """Returns the score of the model on the data X Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- score : float """ pass ############################################################################### class MetaEstimatorMixin(object): """Mixin class for all meta estimators in scikit-learn.""" # this is just a tag for the moment ############################################################################### def is_classifier(estimator): """Returns True if the given estimator is (probably) a classifier.""" return getattr(estimator, "_estimator_type", None) == "classifier" def is_regressor(estimator): """Returns True if the given estimator is (probably) a regressor.""" return getattr(estimator, "_estimator_type", None) == "regressor"
bsd-3-clause
rajul/mne-python
mne/stats/tests/test_cluster_level.py
8
20475
import os import os.path as op import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal) from nose.tools import assert_true, assert_raises from scipy import sparse, linalg, stats from mne.fixes import partial import warnings from mne.parallel import _force_serial from mne.stats.cluster_level import (permutation_cluster_test, permutation_cluster_1samp_test, spatio_temporal_cluster_test, spatio_temporal_cluster_1samp_test, ttest_1samp_no_p, summarize_clusters_stc) from mne.utils import run_tests_if_main, slow_test, _TempDir, set_log_file warnings.simplefilter('always') # enable b/c these tests throw warnings n_space = 50 def _get_conditions(): noise_level = 20 n_time_1 = 20 n_time_2 = 13 normfactor = np.hanning(20).sum() rng = np.random.RandomState(42) condition1_1d = rng.randn(n_time_1, n_space) * noise_level for c in condition1_1d: c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor condition2_1d = rng.randn(n_time_2, n_space) * noise_level for c in condition2_1d: c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor pseudoekp = 10 * np.hanning(25)[None, :] condition1_1d[:, 25:] += pseudoekp condition2_1d[:, 25:] -= pseudoekp condition1_2d = condition1_1d[:, :, np.newaxis] condition2_2d = condition2_1d[:, :, np.newaxis] return condition1_1d, condition2_1d, condition1_2d, condition2_2d def test_cache_dir(): """Test use of cache dir """ tempdir = _TempDir() orig_dir = os.getenv('MNE_CACHE_DIR', None) orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None) rng = np.random.RandomState(0) X = rng.randn(9, 2, 10) log_file = op.join(tempdir, 'log.txt') try: os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K' os.environ['MNE_CACHE_DIR'] = tempdir # Fix error for #1507: in-place when memmapping permutation_cluster_1samp_test( X, buffer_size=None, n_jobs=2, n_permutations=1, seed=0, stat_fun=ttest_1samp_no_p, verbose=False) # ensure that non-independence yields warning stat_fun = partial(ttest_1samp_no_p, sigma=1e-3) set_log_file(log_file) permutation_cluster_1samp_test( X, buffer_size=10, n_jobs=2, n_permutations=1, seed=0, stat_fun=stat_fun, verbose=False) with open(log_file, 'r') as fid: assert_true('independently' in ''.join(fid.readlines())) finally: if orig_dir is not None: os.environ['MNE_CACHE_DIR'] = orig_dir else: del os.environ['MNE_CACHE_DIR'] if orig_size is not None: os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size else: del os.environ['MNE_MEMMAP_MIN_SIZE'] set_log_file(None) def test_permutation_step_down_p(): """Test cluster level permutations with step_down_p """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph # noqa except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points X = rng.randn(9, 2, 10) # add some significant points X[:, 0:2, 0:2] += 2 # span two time points and two spatial points X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude thresh = 2 # make sure it works when we use ALL points in step-down t, clusters, p, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=1.0) # make sure using step-down will actually yield improvements sometimes t, clusters, p_old, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.0) assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster t, clusters, p_new, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.05) assert_equal(np.sum(p_new < 0.05), 2) # time one rescued assert_true(np.all(p_old >= p_new)) def test_cluster_permutation_test(): """Test cluster level permutations tests """ condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() for condition1, condition2 in zip((condition1_1d, condition1_2d), (condition2_1d, condition2_2d)): T_obs, clusters, cluster_p_values, hist = permutation_cluster_test( [condition1, condition2], n_permutations=100, tail=1, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) T_obs, clusters, cluster_p_values, hist = permutation_cluster_test( [condition1, condition2], n_permutations=100, tail=0, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) # test with 2 jobs and buffer_size enabled buffer_size = condition1.shape[1] // 10 T_obs, clusters, cluster_p_values_buff, hist =\ permutation_cluster_test([condition1, condition2], n_permutations=100, tail=0, seed=1, n_jobs=2, buffer_size=buffer_size) assert_array_equal(cluster_p_values, cluster_p_values_buff) @slow_test def test_cluster_permutation_t_test(): """Test cluster level permutations T-test """ condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() # use a very large sigma to make sure Ts are not independent stat_funs = [ttest_1samp_no_p, partial(ttest_1samp_no_p, sigma=1e-1)] for stat_fun in stat_funs: for condition1 in (condition1_1d, condition1_2d): # these are so significant we can get away with fewer perms T_obs, clusters, cluster_p_values, hist =\ permutation_cluster_1samp_test(condition1, n_permutations=100, tail=0, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) T_obs_pos, c_1, cluster_p_values_pos, _ =\ permutation_cluster_1samp_test(condition1, n_permutations=100, tail=1, threshold=1.67, seed=1, stat_fun=stat_fun, buffer_size=None) T_obs_neg, _, cluster_p_values_neg, _ =\ permutation_cluster_1samp_test(-condition1, n_permutations=100, tail=-1, threshold=-1.67, seed=1, stat_fun=stat_fun, buffer_size=None) assert_array_equal(T_obs_pos, -T_obs_neg) assert_array_equal(cluster_p_values_pos < 0.05, cluster_p_values_neg < 0.05) # test with 2 jobs and buffer_size enabled buffer_size = condition1.shape[1] // 10 T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \ permutation_cluster_1samp_test(-condition1, n_permutations=100, tail=-1, threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun, buffer_size=buffer_size) assert_array_equal(T_obs_neg, T_obs_neg_buff) assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff) def test_cluster_permutation_with_connectivity(): """Test cluster level permutations with connectivity matrix """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() n_pts = condition1_1d.shape[1] # we don't care about p-values in any of these, so do fewer permutations args = dict(seed=None, max_step=1, exclude=None, step_down_p=0, t_power=1, threshold=1.67, check_disjoint=False, n_permutations=50) did_warn = False for X1d, X2d, func, spatio_temporal_func in \ [(condition1_1d, condition1_2d, permutation_cluster_1samp_test, spatio_temporal_cluster_1samp_test), ([condition1_1d, condition2_1d], [condition1_2d, condition2_2d], permutation_cluster_test, spatio_temporal_cluster_test)]: out = func(X1d, **args) connectivity = grid_to_graph(1, n_pts) out_connectivity = func(X1d, connectivity=connectivity, **args) assert_array_equal(out[0], out_connectivity[0]) for a, b in zip(out_connectivity[1], out[1]): assert_array_equal(out[0][a], out[0][b]) assert_true(np.all(a[b])) # test spatio-temporal w/o time connectivity (repeat spatial pattern) connectivity_2 = sparse.coo_matrix( linalg.block_diag(connectivity.asfptype().todense(), connectivity.asfptype().todense())) if isinstance(X1d, list): X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d] else: X1d_2 = np.concatenate((X1d, X1d), axis=1) out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_2[0][:split]) assert_array_equal(out[0], out_connectivity_2[0][split:]) # make sure we really got 2x the number of original clusters n_clust_orig = len(out[1]) assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_2[0][a[:n_pts]]) for a in out_connectivity_2[1][:]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # now use the other algorithm if isinstance(X1d, list): X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2] else: X1d_3 = np.reshape(X1d_2, (-1, 2, n_space)) out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=0, threshold=1.67, check_disjoint=True) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_3[0][0]) assert_array_equal(out[0], out_connectivity_3[0][1]) # make sure we really got 2x the number of original clusters assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in out_connectivity_3[1]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # test new versus old method out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=2, threshold=1.67) out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=1.67) # clusters could be in a different order sums_4 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_4[1]] sums_5 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_5[1]] sums_4 = np.sort(sums_4) sums_5 = np.sort(sums_5) assert_array_almost_equal(sums_4, sums_5) if not _force_serial: assert_raises(ValueError, spatio_temporal_func, X1d_3, n_permutations=1, connectivity=connectivity, max_step=1, threshold=1.67, n_jobs=-1000) # not enough TFCE params assert_raises(KeyError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=dict(me='hello')) # too extreme a start threshold with warnings.catch_warnings(record=True) as w: spatio_temporal_func(X1d_3, connectivity=connectivity, threshold=dict(start=10, step=1)) if not did_warn: assert_true(len(w) == 1) did_warn = True # too extreme a start threshold assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=1, step=-1)) assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=-1, step=1)) # wrong type for threshold assert_raises(TypeError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=[]) # wrong value for tail assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=2) # make sure it actually found a significant point out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=dict(start=1, step=1)) assert_true(np.min(out_connectivity_6[2]) < 0.05) @slow_test def test_permutation_connectivity_equiv(): """Test cluster level permutations with and without connectivity """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points n_time = 2 n_space = 4 X = rng.randn(6, n_time, n_space) # add some significant points X[:, :, 0:2] += 10 # span two time points and two spatial points X[:, 1, 3] += 20 # span one time point max_steps = [1, 1, 1, 2] # This will run full algorithm in two ways, then the ST-algorithm in 2 ways # All of these should give the same results conns = [None, grid_to_graph(n_time, n_space), grid_to_graph(1, n_space), grid_to_graph(1, n_space)] stat_map = None thresholds = [2, dict(start=1.5, step=1.0)] sig_counts = [2, 5] sdps = [0, 0.05, 0.05] ots = ['mask', 'mask', 'indices'] stat_fun = partial(ttest_1samp_no_p, sigma=1e-3) for thresh, count in zip(thresholds, sig_counts): cs = None ps = None for max_step, conn in zip(max_steps, conns): for sdp, ot in zip(sdps, ots): t, clusters, p, H0 = \ permutation_cluster_1samp_test( X, threshold=thresh, connectivity=conn, n_jobs=2, max_step=max_step, stat_fun=stat_fun, step_down_p=sdp, out_type=ot) # make sure our output datatype is correct if ot == 'mask': assert_true(isinstance(clusters[0], np.ndarray)) assert_true(clusters[0].dtype == bool) assert_array_equal(clusters[0].shape, X.shape[1:]) else: # ot == 'indices' assert_true(isinstance(clusters[0], tuple)) # make sure all comparisons were done; for TFCE, no perm # should come up empty if count == 8: assert_true(not np.any(H0 == 0)) inds = np.where(p < 0.05)[0] assert_true(len(inds) == count) this_cs = [clusters[ii] for ii in inds] this_ps = p[inds] this_stat_map = np.zeros((n_time, n_space), dtype=bool) for ci, c in enumerate(this_cs): if isinstance(c, tuple): this_c = np.zeros((n_time, n_space), bool) for x, y in zip(c[0], c[1]): this_stat_map[x, y] = True this_c[x, y] = True this_cs[ci] = this_c c = this_c this_stat_map[c] = True if cs is None: ps = this_ps cs = this_cs if stat_map is None: stat_map = this_stat_map assert_array_equal(ps, this_ps) assert_true(len(cs) == len(this_cs)) for c1, c2 in zip(cs, this_cs): assert_array_equal(c1, c2) assert_array_equal(stat_map, this_stat_map) @slow_test def spatio_temporal_cluster_test_connectivity(): """Test spatio-temporal cluster permutations """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() rng = np.random.RandomState(0) noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10) data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1]) noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10) data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1]) conn = grid_to_graph(data1_2d.shape[-1], 1) threshold = dict(start=4.0, step=2) T_obs, clusters, p_values_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn, n_permutations=50, tail=1, seed=1, threshold=threshold, buffer_size=None) buffer_size = data1_2d.size // 10 T_obs, clusters, p_values_no_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=buffer_size) assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05)) # make sure results are the same without buffer_size T_obs, clusters, p_values2, hist2 = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=None) assert_array_equal(p_values_no_conn, p_values2) def ttest_1samp(X): """Returns T-values """ return stats.ttest_1samp(X, 0)[0] def test_summarize_clusters(): """Test cluster summary stcs """ clu = (np.random.random([1, 20484]), [(np.array([0]), np.array([0, 2, 4]))], np.array([0.02, 0.1]), np.array([12, -14, 30])) stc_sum = summarize_clusters_stc(clu) assert_true(stc_sum.data.shape[1] == 2) clu[2][0] = 0.3 assert_raises(RuntimeError, summarize_clusters_stc, clu) run_tests_if_main()
bsd-3-clause
sarahgrogan/scikit-learn
sklearn/metrics/setup.py
299
1024
import os import os.path import numpy from numpy.distutils.misc_util import Configuration from sklearn._build_utils import get_blas_info def configuration(parent_package="", top_path=None): config = Configuration("metrics", parent_package, top_path) cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') config.add_extension("pairwise_fast", sources=["pairwise_fast.c"], include_dirs=[os.path.join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], libraries=cblas_libs, extra_compile_args=blas_info.pop('extra_compile_args', []), **blas_info) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
bsd-3-clause
scottpurdy/nupic
examples/opf/clients/hotgym/anomaly/one_gym/nupic_anomaly_output.py
49
9450
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Provides two classes with the same signature for writing data out of NuPIC models. (This is a component of the One Hot Gym Anomaly Tutorial.) """ import csv from collections import deque from abc import ABCMeta, abstractmethod from nupic.algorithms import anomaly_likelihood # Try to import matplotlib, but we don't have to. try: import matplotlib matplotlib.use('TKAgg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.dates import date2num, DateFormatter except ImportError: pass WINDOW = 300 HIGHLIGHT_ALPHA = 0.3 ANOMALY_HIGHLIGHT_COLOR = 'red' WEEKEND_HIGHLIGHT_COLOR = 'yellow' ANOMALY_THRESHOLD = 0.9 class NuPICOutput(object): __metaclass__ = ABCMeta def __init__(self, name): self.name = name self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood() @abstractmethod def write(self, timestamp, value, predicted, anomalyScore): pass @abstractmethod def close(self): pass class NuPICFileOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICFileOutput, self).__init__(*args, **kwargs) self.outputFiles = [] self.outputWriters = [] self.lineCount = 0 headerRow = [ 'timestamp', 'kw_energy_consumption', 'prediction', 'anomaly_score', 'anomaly_likelihood' ] outputFileName = "%s_out.csv" % self.name print "Preparing to output %s data to %s" % (self.name, outputFileName) self.outputFile = open(outputFileName, "w") self.outputWriter = csv.writer(self.outputFile) self.outputWriter.writerow(headerRow) def write(self, timestamp, value, predicted, anomalyScore): if timestamp is not None: anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability( value, anomalyScore, timestamp ) outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood] self.outputWriter.writerow(outputRow) self.lineCount += 1 def close(self): self.outputFile.close() print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name) def extractWeekendHighlights(dates): weekendsOut = [] weekendSearch = [5, 6] weekendStart = None for i, date in enumerate(dates): if date.weekday() in weekendSearch: if weekendStart is None: # Mark start of weekend weekendStart = i else: if weekendStart is not None: # Mark end of weekend weekendsOut.append(( weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) weekendStart = None # Cap it off if we're still in the middle of a weekend if weekendStart is not None: weekendsOut.append(( weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) return weekendsOut def extractAnomalyIndices(anomalyLikelihood): anomaliesOut = [] anomalyStart = None for i, likelihood in enumerate(anomalyLikelihood): if likelihood >= ANOMALY_THRESHOLD: if anomalyStart is None: # Mark start of anomaly anomalyStart = i else: if anomalyStart is not None: # Mark end of anomaly anomaliesOut.append(( anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) anomalyStart = None # Cap it off if we're still in the middle of an anomaly if anomalyStart is not None: anomaliesOut.append(( anomalyStart, len(anomalyLikelihood)-1, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) return anomaliesOut class NuPICPlotOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICPlotOutput, self).__init__(*args, **kwargs) # Turn matplotlib interactive mode on. plt.ion() self.dates = [] self.convertedDates = [] self.value = [] self.allValues = [] self.predicted = [] self.anomalyScore = [] self.anomalyLikelihood = [] self.actualLine = None self.predictedLine = None self.anomalyScoreLine = None self.anomalyLikelihoodLine = None self.linesInitialized = False self._chartHighlights = [] fig = plt.figure(figsize=(16, 10)) gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) self._mainGraph = fig.add_subplot(gs[0, 0]) plt.title(self.name) plt.ylabel('KW Energy Consumption') plt.xlabel('Date') self._anomalyGraph = fig.add_subplot(gs[1]) plt.ylabel('Percentage') plt.xlabel('Date') # Maximizes window mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.tight_layout() def initializeLines(self, timestamp): print "initializing %s" % self.name anomalyRange = (0.0, 1.0) self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW) self.convertedDates = deque( [date2num(date) for date in self.dates], maxlen=WINDOW ) self.value = deque([0.0] * WINDOW, maxlen=WINDOW) self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW) actualPlot, = self._mainGraph.plot(self.dates, self.value) self.actualLine = actualPlot predictedPlot, = self._mainGraph.plot(self.dates, self.predicted) self.predictedLine = predictedPlot self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3) anomalyScorePlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'm' ) anomalyScorePlot.axes.set_ylim(anomalyRange) self.anomalyScoreLine = anomalyScorePlot anomalyLikelihoodPlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'r' ) anomalyLikelihoodPlot.axes.set_ylim(anomalyRange) self.anomalyLikelihoodLine = anomalyLikelihoodPlot self._anomalyGraph.legend( tuple(['anomaly score', 'anomaly likelihood']), loc=3 ) dateFormatter = DateFormatter('%m/%d %H:%M') self._mainGraph.xaxis.set_major_formatter(dateFormatter) self._anomalyGraph.xaxis.set_major_formatter(dateFormatter) self._mainGraph.relim() self._mainGraph.autoscale_view(True, True, True) self.linesInitialized = True def highlightChart(self, highlights, chart): for highlight in highlights: # Each highlight contains [start-index, stop-index, color, alpha] self._chartHighlights.append(chart.axvspan( self.convertedDates[highlight[0]], self.convertedDates[highlight[1]], color=highlight[2], alpha=highlight[3] )) def write(self, timestamp, value, predicted, anomalyScore): # We need the first timestamp to initialize the lines at the right X value, # so do that check first. if not self.linesInitialized: self.initializeLines(timestamp) anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability( value, anomalyScore, timestamp ) self.dates.append(timestamp) self.convertedDates.append(date2num(timestamp)) self.value.append(value) self.allValues.append(value) self.predicted.append(predicted) self.anomalyScore.append(anomalyScore) self.anomalyLikelihood.append(anomalyLikelihood) # Update main chart data self.actualLine.set_xdata(self.convertedDates) self.actualLine.set_ydata(self.value) self.predictedLine.set_xdata(self.convertedDates) self.predictedLine.set_ydata(self.predicted) # Update anomaly chart data self.anomalyScoreLine.set_xdata(self.convertedDates) self.anomalyScoreLine.set_ydata(self.anomalyScore) self.anomalyLikelihoodLine.set_xdata(self.convertedDates) self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood) # Remove previous highlighted regions for poly in self._chartHighlights: poly.remove() self._chartHighlights = [] weekends = extractWeekendHighlights(self.dates) anomalies = extractAnomalyIndices(self.anomalyLikelihood) # Highlight weekends in main chart self.highlightChart(weekends, self._mainGraph) # Highlight anomalies in anomaly chart self.highlightChart(anomalies, self._anomalyGraph) maxValue = max(self.allValues) self._mainGraph.relim() self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02)) self._mainGraph.relim() self._mainGraph.autoscale_view(True, scaley=False) self._anomalyGraph.relim() self._anomalyGraph.autoscale_view(True, True, True) plt.draw() def close(self): plt.ioff() plt.show() NuPICOutput.register(NuPICFileOutput) NuPICOutput.register(NuPICPlotOutput)
agpl-3.0
vybstat/scikit-learn
sklearn/metrics/cluster/tests/test_unsupervised.py
230
2823
import numpy as np from scipy.sparse import csr_matrix from sklearn import datasets from sklearn.metrics.cluster.unsupervised import silhouette_score from sklearn.metrics import pairwise_distances from sklearn.utils.testing import assert_false, assert_almost_equal from sklearn.utils.testing import assert_raises_regexp def test_silhouette(): # Tests the Silhouette Coefficient. dataset = datasets.load_iris() X = dataset.data y = dataset.target D = pairwise_distances(X, metric='euclidean') # Given that the actual labels are used, we can assume that S would be # positive. silhouette = silhouette_score(D, y, metric='precomputed') assert(silhouette > 0) # Test without calculating D silhouette_metric = silhouette_score(X, y, metric='euclidean') assert_almost_equal(silhouette, silhouette_metric) # Test with sampling silhouette = silhouette_score(D, y, metric='precomputed', sample_size=int(X.shape[0] / 2), random_state=0) silhouette_metric = silhouette_score(X, y, metric='euclidean', sample_size=int(X.shape[0] / 2), random_state=0) assert(silhouette > 0) assert(silhouette_metric > 0) assert_almost_equal(silhouette_metric, silhouette) # Test with sparse X X_sparse = csr_matrix(X) D = pairwise_distances(X_sparse, metric='euclidean') silhouette = silhouette_score(D, y, metric='precomputed') assert(silhouette > 0) def test_no_nan(): # Assert Silhouette Coefficient != nan when there is 1 sample in a class. # This tests for the condition that caused issue 960. # Note that there is only one sample in cluster 0. This used to cause the # silhouette_score to return nan (see bug #960). labels = np.array([1, 0, 1, 1, 1]) # The distance matrix doesn't actually matter. D = np.random.RandomState(0).rand(len(labels), len(labels)) silhouette = silhouette_score(D, labels, metric='precomputed') assert_false(np.isnan(silhouette)) def test_correct_labelsize(): # Assert 1 < n_labels < n_samples dataset = datasets.load_iris() X = dataset.data # n_labels = n_samples y = np.arange(X.shape[0]) assert_raises_regexp(ValueError, 'Number of labels is %d\. Valid values are 2 ' 'to n_samples - 1 \(inclusive\)' % len(np.unique(y)), silhouette_score, X, y) # n_labels = 1 y = np.zeros(X.shape[0]) assert_raises_regexp(ValueError, 'Number of labels is %d\. Valid values are 2 ' 'to n_samples - 1 \(inclusive\)' % len(np.unique(y)), silhouette_score, X, y)
bsd-3-clause
smjhnits/Praktikum_TU_D_16-17
Anfängerpraktikum/Protokolle/V207_Das_Kugelfallviskosimeter/Auswertung/Auswertung.py
1
6592
import numpy as np from scipy.stats import sem from uncertainties import ufloat import uncertainties.unumpy as unp import matplotlib.pyplot as plt from scipy.optimize import curve_fit daten = np.genfromtxt("Daten.txt", unpack = True) DurchmesserGr = np.array([1.561, 1.560, 1.560]) DurchmesserKl = np.array([1.543, 1.544, 1.543]) RadiusGr = DurchmesserGr / 2 RadiusKl = DurchmesserKl / 2 GewichtGr = np.array([4.96, 4.96, 4.96]) GewichtKl = np.array([4.45, 4.45, 4.45]) s = 1 / np.sqrt(3) Rgr = ufloat(np.mean(RadiusGr), np.std(RadiusGr, ddof = 1) * s) Rkl = ufloat(np.mean(RadiusKl), np.std(RadiusKl, ddof = 1) * s) Ggr = ufloat(np.mean(GewichtGr), np.std(GewichtGr, ddof = 1) * s) Gkl = ufloat(np.mean(GewichtKl), np.std(GewichtKl, ddof = 1) * s) print("Durchmesser große Kugel: ", np.mean(DurchmesserGr), np.std(DurchmesserGr, ddof = 1) *s) print("Durchmesser kleine Kugel: ", np.mean(DurchmesserKl), np.std(DurchmesserKl, ddof = 1) *s) print("Radius große Kugel: ", Rgr, " cm") print("Gewicht große Kugel: ", Ggr, " g") print("Radius kleine Kugel: ", Rkl, " cm") print("Gewicht kleine Kugel: ", Gkl, " g") print('\n') #Auslesen und Mitteln der Daten FallzeitKugel2 = ufloat(np.mean(daten[0]), np.std(daten[0], ddof = 1) * 1/np.sqrt(len(daten[0]))) FallzeitKugel1 = ufloat(np.mean(daten[1]), np.std(daten[1], ddof = 1) * 1/np.sqrt(len(daten[1]))) Temperaturen = daten[2] Temperaturen += 273.15 Messungen = np.array([daten[3], daten[4]]) Messungen = np.transpose(Messungen) print("Fallzeit kleine Kugel: ", FallzeitKugel2) print("Fallzeit große Kugel: ", FallzeitKugel1) print("Temperaturen: ", '\n', Temperaturen) print('\n') Mittelwerte = np.array([ np.mean(row) for row in Messungen ]) Fehler = np.array([1/np.sqrt(len(row)) * np.std(row, ddof=1) for row in Messungen]) kombiniert = np.array([ufloat(n, Fehler[i]) for i,n in enumerate(Mittelwerte)]) #print("Messung der Fallzeiten: ", '\n', unp.nominal_values(kombiniert)) print("Messung der Fallzeiten: ", '\n', kombiniert) print('\n') #Ermittlung der Kugeldichte und benötigte Daten DichteKl = Gkl / (4/3 * np.pi * Rkl**3) #DichteKl /= 1000 # Anpassung in g/cm^3 DichteGr = Ggr / (4/3 * np.pi * Rgr**3) #DichteGr /= 1000 # Anpassung in g/cm^3 DichteW = 0.998 DichteW_array = np.array([0.9957, 0.994, 0.9922, 0.9902, 0.9880, 0.9980, 0.9857, 0.9832, 0.9806, 0.9778]) Kkl = 0.07640 # in mPa * cm^3 / g print("Kugel 1 = große Kugel") print("Kugel 2 = kleine Kugel") print("Dichte der kleinen Kugel: ", DichteKl, " g/cm^3") print("Dichte der großen Kugel: ", DichteGr, " g/cm^3") print("Dichte Wasser: ", DichteW, " g/cm^3") print('\n') #Ermittlung von K Viskositätkl20 = (Kkl * (DichteKl - DichteW) * FallzeitKugel2) Kgr = Viskositätkl20 / ( (DichteGr - DichteW) * FallzeitKugel1) print("Viskosität kleiner Kugel: ", Viskositätkl20, " mPa s") print("Apparaturkonstante kleine Kugel: ", Kkl, " mPa s cm^3 7 g") print("Apparaturkonstante große Kugel: ", Kgr, " mPa s cm^3 7 g") print('\n') #Ermittlung der ViskositäTemperaturen Viskos = np.array([ Kgr * (DichteGr - DichteW_array[i]) * n for i,n in enumerate(kombiniert) ]) #print("Viskositäten: ",'\n', unp.nominal_values(Viskos)) print("Viskositäten: ",'\n', Viskos) print('\n') print("Temperaturen: ", '\n', Temperaturen) print('\n') Viskos *= 10**(-3) #Literaturwerte einspeichern ViskosLit = np.array([ 797.7, 653.1, 547.1, 466.8, 404.5, ]) ViskosLit *= 10**(-3) Temperature = np.array([ 30.0, 40.0 , 50.0, 60.0, 70.0, ]) Temperature += 273.15 print("Viskositäten Literaturwerte: ", '\n' , ViskosLit) print('\n') print("Temperaturen Literatur: ", '\n', Temperature) print('\n') ViskosLit *= 10**(-3) #Plot anfertigen def f(x, A, B): return A * np.exp( B / x) params, covariance = curve_fit(f, Temperaturen, unp.nominal_values(Viskos)) paramslit, covariancelit = curve_fit(f, Temperature, ViskosLit) x_plot = np.linspace(300, 350, num = 100) #Der Scheiß für Mareike Viskoslog = np.log(unp.nominal_values(Viskos)) Viskoslitlog = np.log(unp.nominal_values(ViskosLit)) def g(x, A, B): return A * 1/x + B paramslog, covariancelog = curve_fit(g, Temperaturen, Viskoslog) errorsExperimentell = np.sqrt(np.diag(covariancelog)) paramslitlog, covariancelitlog = curve_fit(g, Temperature, Viskoslitlog) errorsLit = np.sqrt(np.diag(covariancelitlog)) print("Parameter für Viskositäten Literatur: ", '\n', paramslit, " A in mPa s, B in K") print("Parameter mit linearer Regression", '\n', paramslitlog) print("Fehler der linearen Regression: ", '\n', errorsLit[0], errorsLit[1]) print('\n') # Plot mit V gegen 1/T plt.clf() plt.plot(1/Temperaturen, unp.log(unp.nominal_values(Viskos)), "bx", label = "Viskositäten") #plt.plot(1/x_plot, unp.log(f(x_plot, *params)), "r-", label = "Regressionskurve") plt.plot(1/Temperature, unp.log(ViskosLit), "gx", label = "Viskositäten Literatur") plt.plot(1/x_plot, g(x_plot, *paramslitlog), "k-", label = "Fit der Literaturwerte") plt.plot(1/x_plot, g(x_plot, *paramslog), "r-", label = "Linearer Fit") #fwfewf plt.grid(True, which = "both") plt.xlabel(r"$1/T \,\, in \,\, 1/K$") plt.ylabel(r"$ln(\eta)$ ") plt.legend(loc = 'best') plt.xlim(0.0029, 0.00333) #plt.yscale('log') plt.tight_layout() plt.savefig("Plot_T_1.pdf") print("Parameter für Viskositäten Experimentell: ", '\n', params, " A in mPa s, B in K") print("Parameter B : ", '\n', ufloat(paramslog[0], errorsExperimentell[0]),) ParameterA = ufloat(paramslog[1], errorsExperimentell[1]) print("Parameter A : ", '\n', unp.exp(ParameterA)) print('\n') #plt.show() #Abweichungen Literatur und Experimentell ViskosLit2 = np.array([ paramslit[0] * np.exp( paramslit[1] / n ) for n in Temperaturen]) Abweichungen = Viskos - ViskosLit2 print("Durchschnittliche Abweichung: ", np.mean(Abweichungen)) print('\n') #Geschwindigkeiten Strecke = 0.1 Geschwindigkeiten = Strecke / kombiniert Vcm = Geschwindigkeiten * 100 #print("Geschwindigkeiten: ", '\n', unp.nominal_values(Vcm) ) print("Geschwindigkeiten: Angaben in cm", '\n', Vcm) print('\n') #Reynoldszahl Reynolds = np.array([DichteW_array[i] * Vcm[i] * 2 * Rgr / 10 / n for i,n in enumerate(Viskos)]) #print("Reynoldszahl mit cm : ", '\n', unp.nominal_values(Reynolds)) print("Reynoldszahl große Kugel : ", '\n', Reynolds) print('\n') #Reynoldszahl kleine Kugel Viskositätkl20 *= 10**(-3) print("Fallzeit kleine Kugel: ", FallzeitKugel2) Reynoldskl = DichteW * Strecke * 100 * 2 * Rkl / 10 / (Viskositätkl20 * FallzeitKugel2) print("Reynoldszahl in cm kleine kugel : ", '\n' ,Reynoldskl)
mit
Obus/scikit-learn
sklearn/tests/test_kernel_approximation.py
244
7588
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # appreviations for easier formular X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # appreviations for easier formular X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel linear_kernel = lambda X, Y: np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
bsd-3-clause
jobovy/apogee-maps
py/plot_dust_gaia.py
1
2691
############################################################################### # plot_dust_gaia: plot the dust-map at X for Gaia ############################################################################### import sys import numpy import healpy import matplotlib matplotlib.use('Agg') from galpy.util import bovy_plot, bovy_coords import dust # nside to work at, 2048 is the max _NSIDE= 2048 def plot_dust_gaia(dist,plotname): # Load the dust map green15map= dust.load_combined(dist,nest=True,nside_out=_NSIDE) dm= dust.dist2distmod(dist) green15map[green15map == healpy.UNSEEN]= -1. print "%i are NaN" % (numpy.sum(numpy.isnan(green15map))) #theta, phi= healpy.pixelfunc.pix2ang(_NSIDE,numpy.arange(healpy.pixelfunc.nside2npix(_NSIDE)),nest=True) #print (numpy.pi/2.-theta)[numpy.isnan(green15map)]/numpy.pi*180. #print phi[numpy.isnan(green15map)]/numpy.pi*180. # plot it healpy.visufunc.mollview(green15map, nest=True, xsize=4000,min=0., max=round(10.*(20.-dm-0.68))/10., format=r'$%g$', cmap='gist_yarg', title="", unit='$A_G\,(\mathrm{mag})$') # Plot outline of Marshall et al. ls= numpy.linspace(-100.125,100.125,1001) healpy.visufunc.projplot(ls,ls*0.-10.125,'k--',lw=1.2,lonlat=True) healpy.visufunc.projplot(ls,ls*0.+10.125,'k--',lw=1.2,lonlat=True) bs= numpy.linspace(-10.125,10.125,1001) healpy.visufunc.projplot(bs*0.-100.125,bs,'k--',lw=1.2,lonlat=True) healpy.visufunc.projplot(bs*0.+100.125,bs,'k--',lw=1.2,lonlat=True) # Plot boundary of Green et al. map, dec > -30. ras= numpy.linspace(0.,360.,1001) decs= ras*0.-30. lbs= bovy_coords.radec_to_lb(ras,decs,degree=True) ls= lbs[:,0] bs= lbs[:,1] # rm those within Marshall et al. keepIndx= True-((ls > 259.875)*(numpy.fabs(bs) < 10.125) +(ls < 100.125)*(numpy.fabs(bs) < 10.125)) ls= ls[keepIndx] bs= bs[keepIndx] healpy.visufunc.projplot(ls,bs,'k-.',lw=1.2,lonlat=True) # Labels healpy.visufunc.projtext(350.,-8.,r'$\mathrm{Marshall\ et\ al.\ (2006)}$', lonlat=True,size=13.) healpy.visufunc.projtext(10.,-60.,r'$\mathrm{Drimmel\ et\ al.\ (2003)}$', lonlat=True,size=13.) healpy.visufunc.projtext(160.,40.,r'$\mathrm{Green\ et\ al.\ (2015)}$', lonlat=True,size=13.) bovy_plot.bovy_end_print(plotname) if __name__ == '__main__': plot_dust_gaia(float(sys.argv[1]),sys.argv[2])
bsd-3-clause
ningchi/scikit-learn
examples/text/mlcomp_sparse_document_classification.py
292
4498
""" ======================================================== Classification of text documents: using a MLComp dataset ======================================================== This is an example showing how the scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features instead of standard numpy arrays. The dataset used in this example is the 20 newsgroups dataset and should be downloaded from the http://mlcomp.org (free registration required): http://mlcomp.org/datasets/379 Once downloaded unzip the archive somewhere on your filesystem. For instance in:: % mkdir -p ~/data/mlcomp % cd ~/data/mlcomp % unzip /path/to/dataset-379-20news-18828_XXXXX.zip You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata`` and subfolders ``raw``, ``train`` and ``test`` holding the text documents organized by newsgroups. Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to the root folder holding the uncompressed archive:: % export MLCOMP_DATASETS_HOME="~/data/mlcomp" Then you are ready to run this example using your favorite python shell:: % ipython examples/mlcomp_sparse_document_classification.py """ # Author: Olivier Grisel <[email protected]> # License: BSD 3 clause from __future__ import print_function from time import time import sys import os import numpy as np import scipy.sparse as sp import pylab as pl from sklearn.datasets import load_mlcomp from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import SGDClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.naive_bayes import MultinomialNB print(__doc__) if 'MLCOMP_DATASETS_HOME' not in os.environ: print("MLCOMP_DATASETS_HOME not set; please follow the above instructions") sys.exit(0) # Load the training set print("Loading 20 newsgroups training set... ") news_train = load_mlcomp('20news-18828', 'train') print(news_train.DESCR) print("%d documents" % len(news_train.filenames)) print("%d categories" % len(news_train.target_names)) print("Extracting features from the dataset using a sparse vectorizer") t0 = time() vectorizer = TfidfVectorizer(encoding='latin1') X_train = vectorizer.fit_transform((open(f).read() for f in news_train.filenames)) print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X_train.shape) assert sp.issparse(X_train) y_train = news_train.target print("Loading 20 newsgroups test set... ") news_test = load_mlcomp('20news-18828', 'test') t0 = time() print("done in %fs" % (time() - t0)) print("Predicting the labels of the test set...") print("%d documents" % len(news_test.filenames)) print("%d categories" % len(news_test.target_names)) print("Extracting features from the dataset using the same vectorizer") t0 = time() X_test = vectorizer.transform((open(f).read() for f in news_test.filenames)) y_test = news_test.target print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X_test.shape) ############################################################################### # Benchmark classifiers def benchmark(clf_class, params, name): print("parameters:", params) t0 = time() clf = clf_class(**params).fit(X_train, y_train) print("done in %fs" % (time() - t0)) if hasattr(clf, 'coef_'): print("Percentage of non zeros coef: %f" % (np.mean(clf.coef_ != 0) * 100)) print("Predicting the outcomes of the testing set") t0 = time() pred = clf.predict(X_test) print("done in %fs" % (time() - t0)) print("Classification report on test set for classifier:") print(clf) print() print(classification_report(y_test, pred, target_names=news_test.target_names)) cm = confusion_matrix(y_test, pred) print("Confusion matrix:") print(cm) # Show confusion matrix pl.matshow(cm) pl.title('Confusion matrix of the %s classifier' % name) pl.colorbar() print("Testbenching a linear classifier...") parameters = { 'loss': 'hinge', 'penalty': 'l2', 'n_iter': 50, 'alpha': 0.00001, 'fit_intercept': True, } benchmark(SGDClassifier, parameters, 'SGD') print("Testbenching a MultinomialNB classifier...") parameters = {'alpha': 0.01} benchmark(MultinomialNB, parameters, 'MultinomialNB') pl.show()
bsd-3-clause
qifeigit/scikit-learn
sklearn/linear_model/tests/test_sgd.py
129
43401
import pickle import unittest import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_less from sklearn.utils.testing import raises from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises_regexp from sklearn import linear_model, datasets, metrics from sklearn.base import clone from sklearn.linear_model import SGDClassifier, SGDRegressor from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler class SparseSGDClassifier(SGDClassifier): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).fit(X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw) def decision_function(self, X): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).decision_function(X) def predict_proba(self, X): X = sp.csr_matrix(X) return super(SparseSGDClassifier, self).predict_proba(X) class SparseSGDRegressor(SGDRegressor): def fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.fit(self, X, y, *args, **kw) def partial_fit(self, X, y, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.partial_fit(self, X, y, *args, **kw) def decision_function(self, X, *args, **kw): X = sp.csr_matrix(X) return SGDRegressor.decision_function(self, X, *args, **kw) # Test Data # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2; string class labels X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5], [1, 1], [0.75, 0.5], [1.5, 1.5], [-1, -1], [0, -0.5], [1, -1]]) Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3 T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]]) true_result2 = ["one", "two", "three"] # test sample 3 X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1], [0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]]) Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) # test sample 4 - two more or less redundent feature groups X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0], [1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0], [0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1], [0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]]) Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2]) iris = datasets.load_iris() # test sample 5 - test sample 1 as binary classification problem X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) Y5 = [1, 1, 1, 2, 2, 2] true_result5 = [0, 1, 1] # Classification Test Case class CommonTest(object): def factory(self, **kwargs): if "random_state" not in kwargs: kwargs["random_state"] = 42 return self.factory_class(**kwargs) # a simple implementation of ASGD to use for testing # uses squared loss to find the gradient def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0): if weight_init is None: weights = np.zeros(X.shape[1]) else: weights = weight_init average_weights = np.zeros(X.shape[1]) intercept = intercept_init average_intercept = 0.0 decay = 1.0 # sparse data has a fixed decay of .01 if (isinstance(self, SparseSGDClassifierTestCase) or isinstance(self, SparseSGDRegressorTestCase)): decay = .01 for i, entry in enumerate(X): p = np.dot(entry, weights) p += intercept gradient = p - y[i] weights *= 1.0 - (eta * alpha) weights += -(eta * gradient * entry) intercept += -(eta * gradient) * decay average_weights *= i average_weights += weights average_weights /= i + 1.0 average_intercept *= i average_intercept += intercept average_intercept /= i + 1.0 return average_weights, average_intercept def _test_warm_start(self, X, Y, lr): # Test that explicit warm restart... clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf.fit(X, Y) clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False, learning_rate=lr) clf2.fit(X, Y, coef_init=clf.coef_.copy(), intercept_init=clf.intercept_.copy()) # ... and implicit warm restart are equivalent. clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False, warm_start=True, learning_rate=lr) clf3.fit(X, Y) assert_equal(clf3.t_, clf.t_) assert_array_almost_equal(clf3.coef_, clf.coef_) clf3.set_params(alpha=0.001) clf3.fit(X, Y) assert_equal(clf3.t_, clf2.t_) assert_array_almost_equal(clf3.coef_, clf2.coef_) def test_warm_start_constant(self): self._test_warm_start(X, Y, "constant") def test_warm_start_invscaling(self): self._test_warm_start(X, Y, "invscaling") def test_warm_start_optimal(self): self._test_warm_start(X, Y, "optimal") def test_input_format(self): # Input format tests. clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) clf.fit(X, Y) Y_ = np.array(Y)[:, np.newaxis] Y_ = np.c_[Y_, Y_] assert_raises(ValueError, clf.fit, X, Y_) def test_clone(self): # Test whether clone works ok. clf = self.factory(alpha=0.01, n_iter=5, penalty='l1') clf = clone(clf) clf.set_params(penalty='l2') clf.fit(X, Y) clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2') clf2.fit(X, Y) assert_array_equal(clf.coef_, clf2.coef_) def test_plain_has_no_average_attr(self): clf = self.factory(average=True, eta0=.01) clf.fit(X, Y) assert_true(hasattr(clf, 'average_coef_')) assert_true(hasattr(clf, 'average_intercept_')) assert_true(hasattr(clf, 'standard_intercept_')) assert_true(hasattr(clf, 'standard_coef_')) clf = self.factory() clf.fit(X, Y) assert_false(hasattr(clf, 'average_coef_')) assert_false(hasattr(clf, 'average_intercept_')) assert_false(hasattr(clf, 'standard_intercept_')) assert_false(hasattr(clf, 'standard_coef_')) def test_late_onset_averaging_not_reached(self): clf1 = self.factory(average=600) clf2 = self.factory() for _ in range(100): if isinstance(clf1, SGDClassifier): clf1.partial_fit(X, Y, classes=np.unique(Y)) clf2.partial_fit(X, Y, classes=np.unique(Y)) else: clf1.partial_fit(X, Y) clf2.partial_fit(X, Y) assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16) assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16) def test_late_onset_averaging_reached(self): eta0 = .001 alpha = .0001 Y_encode = np.array(Y) Y_encode[Y_encode == 1] = -1.0 Y_encode[Y_encode == 2] = 1.0 clf1 = self.factory(average=7, learning_rate="constant", loss='squared_loss', eta0=eta0, alpha=alpha, n_iter=2, shuffle=False) clf2 = self.factory(average=0, learning_rate="constant", loss='squared_loss', eta0=eta0, alpha=alpha, n_iter=1, shuffle=False) clf1.fit(X, Y_encode) clf2.fit(X, Y_encode) average_weights, average_intercept = \ self.asgd(X, Y_encode, eta0, alpha, weight_init=clf2.coef_.ravel(), intercept_init=clf2.intercept_) assert_array_almost_equal(clf1.coef_.ravel(), average_weights.ravel(), decimal=16) assert_almost_equal(clf1.intercept_, average_intercept, decimal=16) class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest): """Test suite for the dense representation variant of SGD""" factory_class = SGDClassifier def test_sgd(self): # Check that SGD gives any results :-) for loss in ("hinge", "squared_hinge", "log", "modified_huber"): clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True, loss=loss, n_iter=10, shuffle=True) clf.fit(X, Y) # assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7) assert_array_equal(clf.predict(T), true_result) @raises(ValueError) def test_sgd_bad_l1_ratio(self): # Check whether expected ValueError on bad l1_ratio self.factory(l1_ratio=1.1) @raises(ValueError) def test_sgd_bad_learning_rate_schedule(self): # Check whether expected ValueError on bad learning_rate self.factory(learning_rate="<unknown>") @raises(ValueError) def test_sgd_bad_eta0(self): # Check whether expected ValueError on bad eta0 self.factory(eta0=0, learning_rate="constant") @raises(ValueError) def test_sgd_bad_alpha(self): # Check whether expected ValueError on bad alpha self.factory(alpha=-.1) @raises(ValueError) def test_sgd_bad_penalty(self): # Check whether expected ValueError on bad penalty self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): # Check whether expected ValueError on bad loss self.factory(loss="foobar") @raises(ValueError) def test_sgd_n_iter_param(self): # Test parameter validity check self.factory(n_iter=-10000) @raises(ValueError) def test_sgd_shuffle_param(self): # Test parameter validity check self.factory(shuffle="false") @raises(TypeError) def test_argument_coef(self): # Checks coef_init not allowed as model argument (only fit) # Provided coef_ does not match dataset. self.factory(coef_init=np.zeros((3,))).fit(X, Y) @raises(ValueError) def test_provide_coef(self): # Checks coef_init shape for the warm starts # Provided coef_ does not match dataset. self.factory().fit(X, Y, coef_init=np.zeros((3,))) @raises(ValueError) def test_set_intercept(self): # Checks intercept_ shape for the warm starts # Provided intercept_ does not match dataset. self.factory().fit(X, Y, intercept_init=np.zeros((3,))) def test_set_intercept_binary(self): # Checks intercept_ shape for the warm starts in binary case self.factory().fit(X5, Y5, intercept_init=0) def test_average_binary_computed_correctly(self): # Checks the SGDClassifier correctly computes the average weights eta = .1 alpha = 2. n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) # simple linear function without noise y = np.dot(X, w) y = np.sign(y) clf.fit(X, y) average_weights, average_intercept = self.asgd(X, y, eta, alpha) average_weights = average_weights.reshape(1, -1) assert_array_almost_equal(clf.coef_, average_weights, decimal=14) assert_almost_equal(clf.intercept_, average_intercept, decimal=14) def test_set_intercept_to_intercept(self): # Checks intercept_ shape consistency for the warm starts # Inconsistent intercept_ shape. clf = self.factory().fit(X5, Y5) self.factory().fit(X5, Y5, intercept_init=clf.intercept_) clf = self.factory().fit(X, Y) self.factory().fit(X, Y, intercept_init=clf.intercept_) @raises(ValueError) def test_sgd_at_least_two_labels(self): # Target must have at least two labels self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9)) def test_partial_fit_weight_class_balanced(self): # partial_fit with class_weight='balanced' not supported""" assert_raises_regexp(ValueError, "class_weight 'balanced' is not supported for " "partial_fit. In order to use 'balanced' weights, " "use compute_class_weight\('balanced', classes, y\). " "In place of y you can us a large enough sample " "of the full training set target to properly " "estimate the class frequency distributions. " "Pass the resulting weights as the class_weight " "parameter.", self.factory(class_weight='balanced').partial_fit, X, Y, classes=np.unique(Y)) def test_sgd_multiclass(self): # Multi-class test case clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_average(self): eta = .001 alpha = .01 # Multi-class average test case clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) np_Y2 = np.array(Y2) clf.fit(X2, np_Y2) classes = np.unique(np_Y2) for i, cl in enumerate(classes): y_i = np.ones(np_Y2.shape[0]) y_i[np_Y2 != cl] = -1 average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha) assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16) assert_almost_equal(average_intercept, clf.intercept_[i], decimal=16) def test_sgd_multiclass_with_init_coef(self): # Multi-class test case clf = self.factory(alpha=0.01, n_iter=20) clf.fit(X2, Y2, coef_init=np.zeros((3, 2)), intercept_init=np.zeros(3)) assert_equal(clf.coef_.shape, (3, 2)) assert_true(clf.intercept_.shape, (3,)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_sgd_multiclass_njobs(self): # Multi-class test case with multi-core support clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2) assert_equal(clf.coef_.shape, (3, 2)) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) pred = clf.predict(T2) assert_array_equal(pred, true_result2) def test_set_coef_multiclass(self): # Checks coef_init and intercept_init shape for for multi-class # problems # Provided coef_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2))) # Provided coef_ does match dataset clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2))) # Provided intercept_ does not match dataset clf = self.factory() assert_raises(ValueError, clf.fit, X2, Y2, intercept_init=np.zeros((1,))) # Provided intercept_ does match dataset. clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,))) def test_sgd_proba(self): # Check SGD.predict_proba # Hinge loss does not allow for conditional prob estimate. # We cannot use the factory here, because it defines predict_proba # anyway. clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y) assert_false(hasattr(clf, "predict_proba")) assert_false(hasattr(clf, "predict_log_proba")) # log and modified_huber losses can output probability estimates # binary case for loss in ["log", "modified_huber"]: clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10) clf.fit(X, Y) p = clf.predict_proba([3, 2]) assert_true(p[0, 1] > 0.5) p = clf.predict_proba([-1, -1]) assert_true(p[0, 1] < 0.5) p = clf.predict_log_proba([3, 2]) assert_true(p[0, 1] > p[0, 0]) p = clf.predict_log_proba([-1, -1]) assert_true(p[0, 1] < p[0, 0]) # log loss multiclass probability estimates clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2) d = clf.decision_function([[.1, -.1], [.3, .2]]) p = clf.predict_proba([[.1, -.1], [.3, .2]]) assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1)) assert_almost_equal(p[0].sum(), 1) assert_true(np.all(p[0] >= 0)) p = clf.predict_proba([-1, -1]) d = clf.decision_function([-1, -1]) assert_array_equal(np.argsort(p[0]), np.argsort(d[0])) l = clf.predict_log_proba([3, 2]) p = clf.predict_proba([3, 2]) assert_array_almost_equal(np.log(p), l) l = clf.predict_log_proba([-1, -1]) p = clf.predict_proba([-1, -1]) assert_array_almost_equal(np.log(p), l) # Modified Huber multiclass probability estimates; requires a separate # test because the hard zero/one probabilities may destroy the # ordering present in decision_function output. clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10) clf.fit(X2, Y2) d = clf.decision_function([3, 2]) p = clf.predict_proba([3, 2]) if not isinstance(self, SparseSGDClassifierTestCase): assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1)) else: # XXX the sparse test gets a different X2 (?) assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1)) # the following sample produces decision_function values < -1, # which would cause naive normalization to fail (see comment # in SGDClassifier.predict_proba) x = X.mean(axis=0) d = clf.decision_function(x) if np.all(d < -1): # XXX not true in sparse test case (why?) p = clf.predict_proba(x) assert_array_almost_equal(p[0], [1 / 3.] * 3) def test_sgd_l1(self): # Test L1 regularization n = len(X4) rng = np.random.RandomState(13) idx = np.arange(n) rng.shuffle(idx) X = X4[idx, :] Y = Y4[idx] clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False, n_iter=2000, shuffle=False) clf.fit(X, Y) assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,))) pred = clf.predict(X) assert_array_equal(pred, Y) # test sparsify with dense inputs clf.sparsify() assert_true(sp.issparse(clf.coef_)) pred = clf.predict(X) assert_array_equal(pred, Y) # pickle and unpickle with sparse coef_ clf = pickle.loads(pickle.dumps(clf)) assert_true(sp.issparse(clf.coef_)) pred = clf.predict(X) assert_array_equal(pred, Y) def test_class_weights(self): # Test class weights. X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight=None) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False, class_weight={1: 0.001}) clf.fit(X, y) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) def test_equal_class_weight(self): # Test if equal class weights approx. equals no class weights. X = [[1, 0], [1, 0], [0, 1], [0, 1]] y = [0, 0, 1, 1] clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None) clf.fit(X, y) X = [[1, 0], [0, 1]] y = [0, 1] clf_weighted = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5, 1: 0.5}) clf_weighted.fit(X, y) # should be similar up to some epsilon due to learning rate schedule assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2) @raises(ValueError) def test_wrong_class_weight_label(self): # ValueError due to not existing class label. clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5}) clf.fit(X, Y) @raises(ValueError) def test_wrong_class_weight_format(self): # ValueError due to wrong class_weight argument type. clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5]) clf.fit(X, Y) def test_weights_multiplied(self): # Tests that class_weight and sample_weight are multiplicative class_weights = {1: .6, 2: .3} sample_weights = np.random.random(Y4.shape[0]) multiplied_together = np.copy(sample_weights) multiplied_together[Y4 == 1] *= class_weights[1] multiplied_together[Y4 == 2] *= class_weights[2] clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights) clf2 = self.factory(alpha=0.1, n_iter=20) clf1.fit(X4, Y4, sample_weight=sample_weights) clf2.fit(X4, Y4, sample_weight=multiplied_together) assert_almost_equal(clf1.coef_, clf2.coef_) def test_balanced_weight(self): # Test class weights for imbalanced data""" # compute reference metrics on iris dataset that is quite balanced by # default X, y = iris.data, iris.target X = scale(X) idx = np.arange(X.shape[0]) rng = np.random.RandomState(6) rng.shuffle(idx) X = X[idx] y = y[idx] clf = self.factory(alpha=0.0001, n_iter=1000, class_weight=None, shuffle=False).fit(X, y) assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96, decimal=1) # make the same prediction using balanced class_weight clf_balanced = self.factory(alpha=0.0001, n_iter=1000, class_weight="balanced", shuffle=False).fit(X, y) assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96, decimal=1) # Make sure that in the balanced case it does not change anything # to use "balanced" assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6) # build an very very imbalanced dataset out of iris data X_0 = X[y == 0, :] y_0 = y[y == 0] X_imbalanced = np.vstack([X] + [X_0] * 10) y_imbalanced = np.concatenate([y] + [y_0] * 10) # fit a model on the imbalanced data without class weight info clf = self.factory(n_iter=1000, class_weight=None, shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96) # fit a model with balanced class_weight enabled clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96) # fit another using a fit parameter override clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False) clf.fit(X_imbalanced, y_imbalanced) y_pred = clf.predict(X) assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96) def test_sample_weights(self): # Test weights on individual samples X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = [1, 1, 1, -1, -1] clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) clf.fit(X, y) assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1])) # we give a small weights to class 1 clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2) # now the hyperplane should rotate clock-wise and # the prediction on this point should shift assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1])) @raises(ValueError) def test_wrong_sample_weights(self): # Test if ValueError is raised if sample_weight has wrong shape clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False) # provided sample_weight too long clf.fit(X, Y, sample_weight=np.arange(7)) @raises(ValueError) def test_partial_fit_exception(self): clf = self.factory(alpha=0.01) # classes was not specified clf.partial_fit(X3, Y3) def test_partial_fit_binary(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y) clf.partial_fit(X[:third], Y[:third], classes=classes) assert_equal(clf.coef_.shape, (1, X.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([0, 0]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) y_pred = clf.predict(T) assert_array_equal(y_pred, true_result) def test_partial_fit_multiclass(self): third = X2.shape[0] // 3 clf = self.factory(alpha=0.01) classes = np.unique(Y2) clf.partial_fit(X2[:third], Y2[:third], classes=classes) assert_equal(clf.coef_.shape, (3, X2.shape[1])) assert_equal(clf.intercept_.shape, (3,)) assert_equal(clf.decision_function([0, 0]).shape, (1, 3)) id1 = id(clf.coef_.data) clf.partial_fit(X2[third:], Y2[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def test_fit_then_partial_fit(self): # Partial_fit should work after initial fit in the multiclass case. # Non-regression test for #2496; fit would previously produce a # Fortran-ordered coef_ that subsequent partial_fit couldn't handle. clf = self.factory() clf.fit(X2, Y2) clf.partial_fit(X2, Y2) # no exception here def _test_partial_fit_equal_fit(self, lr): for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)): clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2, learning_rate=lr, shuffle=False) clf.fit(X_, Y_) y_pred = clf.decision_function(T_) t = clf.t_ classes = np.unique(Y_) clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X_, Y_, classes=classes) y_pred2 = clf.decision_function(T_) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_regression_losses(self): clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.1, loss="squared_epsilon_insensitive") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, loss="huber") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01, loss="squared_loss") clf.fit(X, Y) assert_equal(1.0, np.mean(clf.predict(X) == Y)) def test_warm_start_multiclass(self): self._test_warm_start(X2, Y2, "optimal") def test_multiple_fit(self): # Test multiple calls of fit w/ different shaped inputs. clf = self.factory(alpha=0.01, n_iter=5, shuffle=False) clf.fit(X, Y) assert_true(hasattr(clf, "coef_")) # Non-regression test: try fitting with a different label set. y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)] clf.fit(X[:, :-1], y) class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase): """Run exactly the same tests using the sparse representation variant""" factory_class = SparseSGDClassifier ############################################################################### # Regression Test Case class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest): """Test suite for the dense representation variant of SGD""" factory_class = SGDRegressor def test_sgd(self): # Check that SGD gives any results. clf = self.factory(alpha=0.1, n_iter=2, fit_intercept=False) clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2]) assert_equal(clf.coef_[0], clf.coef_[1]) @raises(ValueError) def test_sgd_bad_penalty(self): # Check whether expected ValueError on bad penalty self.factory(penalty='foobar', l1_ratio=0.85) @raises(ValueError) def test_sgd_bad_loss(self): # Check whether expected ValueError on bad loss self.factory(loss="foobar") def test_sgd_averaged_computed_correctly(self): # Tests the average regressor matches the naive implementation eta = .001 alpha = .01 n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) # simple linear function without noise y = np.dot(X, w) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) clf.fit(X, y) average_weights, average_intercept = self.asgd(X, y, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16) def test_sgd_averaged_partial_fit(self): # Tests whether the partial fit yields the same average as the fit eta = .001 alpha = .01 n_samples = 20 n_features = 10 rng = np.random.RandomState(0) X = rng.normal(size=(n_samples, n_features)) w = rng.normal(size=n_features) # simple linear function without noise y = np.dot(X, w) clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)]) clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):]) average_weights, average_intercept = self.asgd(X, y, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16) def test_average_sparse(self): # Checks the average weights on data with 0s eta = .001 alpha = .01 clf = self.factory(loss='squared_loss', learning_rate='constant', eta0=eta, alpha=alpha, fit_intercept=True, n_iter=1, average=True, shuffle=False) n_samples = Y3.shape[0] clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)]) clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):]) average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha) assert_array_almost_equal(clf.coef_, average_weights, decimal=16) assert_almost_equal(clf.intercept_, average_intercept, decimal=16) def test_sgd_least_squares_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_sgd_epsilon_insensitive(self): xmin, xmax = -5, 5 n_samples = 100 X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.99) # simple linear function with noise y = 0.5 * X.ravel() \ + np.random.randn(n_samples, 1).ravel() clf = self.factory(loss='epsilon_insensitive', epsilon=0.01, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_true(score > 0.5) def test_sgd_huber_fit(self): xmin, xmax = -5, 5 n_samples = 100 rng = np.random.RandomState(0) X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1) # simple linear function without noise y = 0.5 * X.ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.99) # simple linear function with noise y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel() clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20, fit_intercept=False) clf.fit(X, y) score = clf.score(X, y) assert_greater(score, 0.5) def test_elasticnet_convergence(self): # Check that the SGD output is consistent with coordinate descent n_samples, n_features = 1000, 5 rng = np.random.RandomState(0) X = np.random.randn(n_samples, n_features) # ground_truth linear model that generate y from X and to which the # models should converge if the regularizer would be set to 0.0 ground_truth_coef = rng.randn(n_features) y = np.dot(X, ground_truth_coef) # XXX: alpha = 0.1 seems to cause convergence problems for alpha in [0.01, 0.001]: for l1_ratio in [0.5, 0.8, 1.0]: cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) cd.fit(X, y) sgd = self.factory(penalty='elasticnet', n_iter=50, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=False) sgd.fit(X, y) err_msg = ("cd and sgd did not converge to comparable " "results for alpha=%f and l1_ratio=%f" % (alpha, l1_ratio)) assert_almost_equal(cd.coef_, sgd.coef_, decimal=2, err_msg=err_msg) def test_partial_fit(self): third = X.shape[0] // 3 clf = self.factory(alpha=0.01) clf.partial_fit(X[:third], Y[:third]) assert_equal(clf.coef_.shape, (X.shape[1], )) assert_equal(clf.intercept_.shape, (1,)) assert_equal(clf.decision_function([0, 0]).shape, (1, )) id1 = id(clf.coef_.data) clf.partial_fit(X[third:], Y[third:]) id2 = id(clf.coef_.data) # check that coef_ haven't been re-allocated assert_true(id1, id2) def _test_partial_fit_equal_fit(self, lr): clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01, learning_rate=lr, shuffle=False) clf.fit(X, Y) y_pred = clf.predict(T) t = clf.t_ clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr, shuffle=False) for i in range(2): clf.partial_fit(X, Y) y_pred2 = clf.predict(T) assert_equal(clf.t_, t) assert_array_almost_equal(y_pred, y_pred2, decimal=2) def test_partial_fit_equal_fit_constant(self): self._test_partial_fit_equal_fit("constant") def test_partial_fit_equal_fit_optimal(self): self._test_partial_fit_equal_fit("optimal") def test_partial_fit_equal_fit_invscaling(self): self._test_partial_fit_equal_fit("invscaling") def test_loss_function_epsilon(self): clf = self.factory(epsilon=0.9) clf.set_params(epsilon=0.1) assert clf.loss_functions['huber'][1] == 0.1 class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase): # Run exactly the same tests using the sparse representation variant factory_class = SparseSGDRegressor def test_l1_ratio(): # Test if l1 ratio extremes match L1 and L2 penalty settings. X, y = datasets.make_classification(n_samples=1000, n_features=100, n_informative=20, random_state=1234) # test if elasticnet with l1_ratio near 1 gives same result as pure l1 est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.9999999999, random_state=42).fit(X, y) est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y) assert_array_almost_equal(est_en.coef_, est_l1.coef_) # test if elasticnet with l1_ratio near 0 gives same result as pure l2 est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.0000000001, random_state=42).fit(X, y) est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y) assert_array_almost_equal(est_en.coef_, est_l2.coef_) def test_underflow_or_overlow(): with np.errstate(all='raise'): # Generate some weird data with hugely unscaled features rng = np.random.RandomState(0) n_samples = 100 n_features = 10 X = rng.normal(size=(n_samples, n_features)) X[:, :2] *= 1e300 assert_true(np.isfinite(X).all()) # Use MinMaxScaler to scale the data without introducing a numerical # instability (computing the standard deviation naively is not possible # on this data) X_scaled = MinMaxScaler().fit_transform(X) assert_true(np.isfinite(X_scaled).all()) # Define a ground truth on the scaled data ground_truth = rng.normal(size=n_features) y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32) assert_array_equal(np.unique(y), [0, 1]) model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500) # smoke test: model is stable on scaled data model.fit(X_scaled, y) assert_true(np.isfinite(model.coef_).all()) # model is numerically unstable on unscaled data msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*" " Scaling input data with StandardScaler or MinMaxScaler" " might help.") assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y) def test_numerical_stability_large_gradient(): # Non regression test case for numerical stability on scaled problems # where the gradient can still explode with some losses model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True, penalty='elasticnet', l1_ratio=0.3, alpha=0.01, eta0=0.001, random_state=0) with np.errstate(all='raise'): model.fit(iris.data, iris.target) assert_true(np.isfinite(model.coef_).all()) def test_large_regularization(): # Non regression tests for numerical stability issues caused by large # regularization parameters for penalty in ['l2', 'l1', 'elasticnet']: model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1, n_iter=5, penalty=penalty, shuffle=False) with np.errstate(all='raise'): model.fit(iris.data, iris.target) assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
bsd-3-clause
chdhr-harshal/MCMonitor
src/python/experiments/grid_nodes.py
1
2175
#!/usr/local/bin/python """ Grid graph nodes objective evolution with increasing k """ from __future__ import division import sys sys.path.insert(1, '..') import __builtin__ import os from MarkovChain import * from MarkovChain.node_objectives import * import networkx as nx import pandas as pd PLOTS_DATA_DIR = "/home/grad3/harshal/Desktop/MCMonitor/Plots_data/" dataframe_rows = [] def get_objective_evolution(method, k, iteration): rows = get_evolution(method, k) for row in rows: row['iteration'] = iteration global dataframe_rows dataframe_rows += rows df = pd.DataFrame(dataframe_rows) df.to_csv(PLOTS_DATA_DIR + "grid_nodes_k_objective_evolution.csv.gz", sep=",", header=True, index=False, compression="gzip") if __name__ == "__main__": G = nx.grid_2d_graph(100, 10, create_using=nx.DiGraph()) G = nx.convert_node_labels_to_integers(G) G = nx.stochastic_graph(G, weight='weight') num_nodes = len(G) num_items = len(G) k = 50 item_distributions = ['uniform', 'direct', 'inverse', 'ego'] for item_distribution in item_distributions: if item_distribution == 'ego': iterations = 10 else: iterations = 1 for iteration in xrange(iterations): print "Evaluating item distribution {}".format(item_distribution) __builtin__.mc = MarkovChain(num_nodes=num_nodes, num_items=num_items, item_distribution=item_distribution, G=G) print "Starting evaluation of methods" methods = [random_nodes, highest_item_nodes, highest_closeness_centrality_nodes, highest_in_degree_centrality_nodes, highest_in_probability_nodes, highest_betweenness_centrality_nodes, smart_greedy_parallel] for method in methods: print "Evaluating method {}".format(method.func_name) get_objective_evolution(method, k, iteration)
mit
blink1073/scikit-image
doc/examples/features_detection/plot_local_binary_pattern.py
12
6776
""" =============================================== Local Binary Pattern for texture classification =============================================== In this example, we will see how to classify textures based on LBP (Local Binary Pattern). LBP looks at points surrounding a central point and tests whether the surrounding points are greater than or less than the central point (i.e. gives a binary result). Before trying out LBP on an image, it helps to look at a schematic of LBPs. The below code is just used to plot the schematic. """ from __future__ import print_function import numpy as np import matplotlib.pyplot as plt METHOD = 'uniform' plt.rcParams['font.size'] = 9 def plot_circle(ax, center, radius, color): circle = plt.Circle(center, radius, facecolor=color, edgecolor='0.5') ax.add_patch(circle) def plot_lbp_model(ax, binary_values): """Draw the schematic for a local binary pattern.""" # Geometry spec theta = np.deg2rad(45) R = 1 r = 0.15 w = 1.5 gray = '0.5' # Draw the central pixel. plot_circle(ax, (0, 0), radius=r, color=gray) # Draw the surrounding pixels. for i, facecolor in enumerate(binary_values): x = R * np.cos(i * theta) y = R * np.sin(i * theta) plot_circle(ax, (x, y), radius=r, color=str(facecolor)) # Draw the pixel grid. for x in np.linspace(-w, w, 4): ax.axvline(x, color=gray) ax.axhline(x, color=gray) # Tweak the layout. ax.axis('image') ax.axis('off') size = w + 0.2 ax.set_xlim(-size, size) ax.set_ylim(-size, size) fig, axes = plt.subplots(ncols=5, figsize=(7, 2)) titles = ['flat', 'flat', 'edge', 'corner', 'non-uniform'] binary_patterns = [np.zeros(8), np.ones(8), np.hstack([np.ones(4), np.zeros(4)]), np.hstack([np.zeros(3), np.ones(5)]), [1, 0, 0, 1, 1, 1, 0, 0]] for ax, values, name in zip(axes, binary_patterns, titles): plot_lbp_model(ax, values) ax.set_title(name) """ .. image:: PLOT2RST.current_figure The figure above shows example results with black (or white) representing pixels that are less (or more) intense than the central pixel. When surrounding pixels are all black or all white, then that image region is flat (i.e. featureless). Groups of continuous black or white pixels are considered "uniform" patterns that can be interpreted as corners or edges. If pixels switch back-and-forth between black and white pixels, the pattern is considered "non-uniform". When using LBP to detect texture, you measure a collection of LBPs over an image patch and look at the distribution of these LBPs. Lets apply LBP to a brick texture. """ from skimage.transform import rotate from skimage.feature import local_binary_pattern from skimage import data from skimage.color import label2rgb # settings for LBP radius = 3 n_points = 8 * radius def overlay_labels(image, lbp, labels): mask = np.logical_or.reduce([lbp == each for each in labels]) return label2rgb(mask, image=image, bg_label=0, alpha=0.5) def highlight_bars(bars, indexes): for i in indexes: bars[i].set_facecolor('r') image = data.load('brick.png') lbp = local_binary_pattern(image, n_points, radius, METHOD) def hist(ax, lbp): n_bins = lbp.max() + 1 return ax.hist(lbp.ravel(), normed=True, bins=n_bins, range=(0, n_bins), facecolor='0.5') # plot histograms of LBP of textures fig, (ax_img, ax_hist) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6)) plt.gray() titles = ('edge', 'flat', 'corner') w = width = radius - 1 edge_labels = range(n_points // 2 - w, n_points // 2 + w + 1) flat_labels = list(range(0, w + 1)) + list(range(n_points - w, n_points + 2)) i_14 = n_points // 4 # 1/4th of the histogram i_34 = 3 * (n_points // 4) # 3/4th of the histogram corner_labels = (list(range(i_14 - w, i_14 + w + 1)) + list(range(i_34 - w, i_34 + w + 1))) label_sets = (edge_labels, flat_labels, corner_labels) for ax, labels in zip(ax_img, label_sets): ax.imshow(overlay_labels(image, lbp, labels)) for ax, labels, name in zip(ax_hist, label_sets, titles): counts, _, bars = hist(ax, lbp) highlight_bars(bars, labels) ax.set_ylim(ymax=np.max(counts[:-1])) ax.set_xlim(xmax=n_points + 2) ax.set_title(name) ax_hist[0].set_ylabel('Percentage') for ax in ax_img: ax.axis('off') """ .. image:: PLOT2RST.current_figure The above plot highlights flat, edge-like, and corner-like regions of the image. The histogram of the LBP result is a good measure to classify textures. Here, we test the histogram distributions against each other using the Kullback-Leibler-Divergence. """ # settings for LBP radius = 2 n_points = 8 * radius def kullback_leibler_divergence(p, q): p = np.asarray(p) q = np.asarray(q) filt = np.logical_and(p != 0, q != 0) return np.sum(p[filt] * np.log2(p[filt] / q[filt])) def match(refs, img): best_score = 10 best_name = None lbp = local_binary_pattern(img, n_points, radius, METHOD) n_bins = lbp.max() + 1 hist, _ = np.histogram(lbp, normed=True, bins=n_bins, range=(0, n_bins)) for name, ref in refs.items(): ref_hist, _ = np.histogram(ref, normed=True, bins=n_bins, range=(0, n_bins)) score = kullback_leibler_divergence(hist, ref_hist) if score < best_score: best_score = score best_name = name return best_name brick = data.load('brick.png') grass = data.load('grass.png') wall = data.load('rough-wall.png') refs = { 'brick': local_binary_pattern(brick, n_points, radius, METHOD), 'grass': local_binary_pattern(grass, n_points, radius, METHOD), 'wall': local_binary_pattern(wall, n_points, radius, METHOD) } # classify rotated textures print('Rotated images matched against references using LBP:') print('original: brick, rotated: 30deg, match result: ', match(refs, rotate(brick, angle=30, resize=False))) print('original: brick, rotated: 70deg, match result: ', match(refs, rotate(brick, angle=70, resize=False))) print('original: grass, rotated: 145deg, match result: ', match(refs, rotate(grass, angle=145, resize=False))) # plot histograms of LBP of textures fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(nrows=2, ncols=3, figsize=(9, 6)) plt.gray() ax1.imshow(brick) ax1.axis('off') hist(ax4, refs['brick']) ax4.set_ylabel('Percentage') ax2.imshow(grass) ax2.axis('off') hist(ax5, refs['grass']) ax5.set_xlabel('Uniform LBP values') ax3.imshow(wall) ax3.axis('off') hist(ax6, refs['wall']) """ .. image:: PLOT2RST.current_figure """ plt.show()
bsd-3-clause
kernc/scikit-learn
benchmarks/bench_multilabel_metrics.py
276
7138
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': partial(f1_score, average='micro'), 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: {}'.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
hasecbinusr/pysal
pysal/core/tests/test_Tables.py
6
1083
import pysal as ps import numpy as np import unittest as ut PANDAS_EXTINCT = ps.common.pandas is None class Test_Table(ut.TestCase): def setUp(self): self.filehandler = ps.open(ps.examples.get_path('columbus.dbf')) self.df = self.filehandler.to_df() self.filehandler.seek(0) self.shapefile = ps.open(ps.examples.get_path('columbus.shp')) self.csvhandler = ps.open(ps.examples.get_path('usjoin.csv')) self.csv_df = self.csvhandler.to_df() self.csvhandler.seek(0) @ut.skipIf(PANDAS_EXTINCT, 'missing pandas') def test_to_df(self): for column in self.csv_df.columns: if column.lower() == 'name': continue np.testing.assert_allclose(self.csvhandler.by_col(column), self.csv_df[column].values) for column in self.df.columns: if column == 'geometry': continue np.testing.assert_allclose(self.filehandler.by_col(column), self.df[column])
bsd-3-clause
Acehaidrey/incubator-airflow
tests/providers/sqlite/hooks/test_sqlite.py
7
3828
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import unittest from unittest import mock from unittest.mock import patch from airflow.models import Connection from airflow.providers.sqlite.hooks.sqlite import SqliteHook class TestSqliteHookConn(unittest.TestCase): def setUp(self): self.connection = Connection(host='host') class UnitTestSqliteHook(SqliteHook): conn_name_attr = 'test_conn_id' self.db_hook = UnitTestSqliteHook() self.db_hook.get_connection = mock.Mock() self.db_hook.get_connection.return_value = self.connection @patch('airflow.providers.sqlite.hooks.sqlite.sqlite3.connect') def test_get_conn(self, mock_connect): self.db_hook.get_conn() mock_connect.assert_called_once_with('host') @patch('airflow.providers.sqlite.hooks.sqlite.sqlite3.connect') def test_get_conn_non_default_id(self, mock_connect): self.db_hook.test_conn_id = 'non_default' # pylint: disable=attribute-defined-outside-init self.db_hook.get_conn() mock_connect.assert_called_once_with('host') self.db_hook.get_connection.assert_called_once_with('non_default') class TestSqliteHook(unittest.TestCase): def setUp(self): self.cur = mock.MagicMock() self.conn = mock.MagicMock() self.conn.cursor.return_value = self.cur conn = self.conn class UnitTestSqliteHook(SqliteHook): conn_name_attr = 'test_conn_id' log = mock.MagicMock() def get_conn(self): return conn self.db_hook = UnitTestSqliteHook() def test_get_first_record(self): statement = 'SQL' result_sets = [('row1',), ('row2',)] self.cur.fetchone.return_value = result_sets[0] self.assertEqual(result_sets[0], self.db_hook.get_first(statement)) self.conn.close.assert_called_once_with() self.cur.close.assert_called_once_with() self.cur.execute.assert_called_once_with(statement) def test_get_records(self): statement = 'SQL' result_sets = [('row1',), ('row2',)] self.cur.fetchall.return_value = result_sets self.assertEqual(result_sets, self.db_hook.get_records(statement)) self.conn.close.assert_called_once_with() self.cur.close.assert_called_once_with() self.cur.execute.assert_called_once_with(statement) def test_get_pandas_df(self): statement = 'SQL' column = 'col' result_sets = [('row1',), ('row2',)] self.cur.description = [(column,)] self.cur.fetchall.return_value = result_sets df = self.db_hook.get_pandas_df(statement) self.assertEqual(column, df.columns[0]) self.assertEqual(result_sets[0][0], df.values.tolist()[0][0]) self.assertEqual(result_sets[1][0], df.values.tolist()[1][0]) self.cur.execute.assert_called_once_with(statement) def test_run_log(self): statement = 'SQL' self.db_hook.run(statement) assert self.db_hook.log.info.call_count == 2
apache-2.0
bromjiri/Presto
trainer/tests/final-twtter.py
1
1993
import datetime import trainer.corpora as crp import trainer.features as ftr import trainer.classifier_test_verbose as cls import os # vars type = "final" nltk_run = False sklearn_run = True COUNT = 25000 cut = int((COUNT / 2) * 4 / 5) array = [15000] def run(dataset): nlt = dict() skl = dict() dir = "output/" + dataset + "/" + type + "/" os.makedirs(dir, exist_ok=True) # file for variable in array: var_name = str(variable) if nltk_run: nlt_file = dir + dataset + "-" + type + "-" + var_name + "-nlt.csv" nlt[var_name] = open(nlt_file, 'a') nlt[var_name].write(str(datetime.datetime.today()) + "\n") if sklearn_run: skl_file = dir + dataset + "-" + type + "-" + var_name + "-skl.csv" skl[var_name] = open(skl_file, 'a') skl[var_name].write(str(datetime.datetime.today()) + "\n") # cycle for x in range(0, 10): print(x) corpora = crp.Corpora(dataset, count=COUNT, shuffle=True) for variable in array: print(str(variable)) var_name = str(variable) features = ftr.Features(corpora, total=COUNT, bigram=True, stop=False, stem="porter", lower=True, inf_count=variable) posfeats = features.get_features_pos() negfeats = features.get_fearures_neg() trainfeats = negfeats[:cut] + posfeats[:cut] testfeats = negfeats[cut:] + posfeats[cut:] nlt_output, skl_output = cls.train(trainfeats, testfeats, dataset, nlt=nltk_run, skl=sklearn_run) # if nltk_run: # print(str(nlt_output)) # nlt[var_name].write(nlt_output) # nlt[var_name].flush() # if sklearn_run: # print(str(skl_output)) # skl[var_name].write(skl_output) # skl[var_name].flush() dataset_array = ["stanford"] for dataset in dataset_array: run(dataset)
mit
linebp/pandas
pandas/tests/plotting/test_series.py
1
32501
# coding: utf-8 """ Test cases for Series.plot """ import itertools import pytest from datetime import datetime import pandas as pd from pandas import Series, DataFrame, date_range from pandas.compat import range, lrange import pandas.util.testing as tm from pandas.util.testing import slow import numpy as np from numpy.random import randn import pandas.plotting as plotting from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works, _skip_if_no_scipy_gaussian_kde, _ok_for_gaussian_kde) tm._skip_if_no_mpl() class TestSeriesPlots(TestPlotBase): def setup_method(self, method): TestPlotBase.setup_method(self, method) import matplotlib as mpl mpl.rcdefaults() self.ts = tm.makeTimeSeries() self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' self.iseries = tm.makePeriodSeries() self.iseries.name = 'iseries' @slow def test_plot(self): _check_plot_works(self.ts.plot, label='foo') _check_plot_works(self.ts.plot, use_index=False) axes = _check_plot_works(self.ts.plot, rot=0) self._check_ticks_props(axes, xrot=0) ax = _check_plot_works(self.ts.plot, style='.', logy=True) self._check_ax_scales(ax, yaxis='log') ax = _check_plot_works(self.ts.plot, style='.', logx=True) self._check_ax_scales(ax, xaxis='log') ax = _check_plot_works(self.ts.plot, style='.', loglog=True) self._check_ax_scales(ax, xaxis='log', yaxis='log') _check_plot_works(self.ts[:10].plot.bar) _check_plot_works(self.ts.plot.area, stacked=False) _check_plot_works(self.iseries.plot) for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(self.series[:5].plot, kind=kind) _check_plot_works(self.series[:10].plot.barh) ax = _check_plot_works(Series(randn(10)).plot.bar, color='black') self._check_colors([ax.patches[0]], facecolors=['black']) # GH 6951 ax = _check_plot_works(self.ts.plot, subplots=True) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1)) self._check_axes_shape(ax, axes_num=1, layout=(1, 1)) @slow def test_plot_figsize_and_title(self): # figsize and title _, ax = self.plt.subplots() ax = self.series.plot(title='Test', figsize=(16, 8), ax=ax) self._check_text_labels(ax.title, 'Test') self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) def test_dont_modify_rcParams(self): # GH 8242 if self.mpl_ge_1_5_0: key = 'axes.prop_cycle' else: key = 'axes.color_cycle' colors = self.plt.rcParams[key] _, ax = self.plt.subplots() Series([1, 2, 3]).plot(ax=ax) assert colors == self.plt.rcParams[key] def test_ts_line_lim(self): fig, ax = self.plt.subplots() ax = self.ts.plot(ax=ax) xmin, xmax = ax.get_xlim() lines = ax.get_lines() assert xmin == lines[0].get_data(orig=False)[0][0] assert xmax == lines[0].get_data(orig=False)[0][-1] tm.close() ax = self.ts.plot(secondary_y=True, ax=ax) xmin, xmax = ax.get_xlim() lines = ax.get_lines() assert xmin == lines[0].get_data(orig=False)[0][0] assert xmax == lines[0].get_data(orig=False)[0][-1] def test_ts_area_lim(self): _, ax = self.plt.subplots() ax = self.ts.plot.area(stacked=False, ax=ax) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin == line[0] assert xmax == line[-1] tm.close() # GH 7471 _, ax = self.plt.subplots() ax = self.ts.plot.area(stacked=False, x_compat=True, ax=ax) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin == line[0] assert xmax == line[-1] tm.close() tz_ts = self.ts.copy() tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET') _, ax = self.plt.subplots() ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin == line[0] assert xmax == line[-1] tm.close() _, ax = self.plt.subplots() ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax) xmin, xmax = ax.get_xlim() line = ax.get_lines()[0].get_data(orig=False)[0] assert xmin == line[0] assert xmax == line[-1] def test_label(self): s = Series([1, 2]) _, ax = self.plt.subplots() ax = s.plot(label='LABEL', legend=True, ax=ax) self._check_legend_labels(ax, labels=['LABEL']) self.plt.close() _, ax = self.plt.subplots() ax = s.plot(legend=True, ax=ax) self._check_legend_labels(ax, labels=['None']) self.plt.close() # get name from index s.name = 'NAME' _, ax = self.plt.subplots() ax = s.plot(legend=True, ax=ax) self._check_legend_labels(ax, labels=['NAME']) self.plt.close() # override the default _, ax = self.plt.subplots() ax = s.plot(legend=True, label='LABEL', ax=ax) self._check_legend_labels(ax, labels=['LABEL']) self.plt.close() # Add lebel info, but don't draw _, ax = self.plt.subplots() ax = s.plot(legend=False, label='LABEL', ax=ax) assert ax.get_legend() is None # Hasn't been drawn ax.legend() # draw it self._check_legend_labels(ax, labels=['LABEL']) def test_line_area_nan_series(self): values = [1, 2, np.nan, 3] s = Series(values) ts = Series(values, index=tm.makeDateIndex(k=4)) for d in [s, ts]: ax = _check_plot_works(d.plot) masked = ax.lines[0].get_ydata() # remove nan for comparison purpose exp = np.array([1, 2, 3], dtype=np.float64) tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp) tm.assert_numpy_array_equal( masked.mask, np.array([False, False, True, False])) expected = np.array([1, 2, 0, 3], dtype=np.float64) ax = _check_plot_works(d.plot, stacked=True) tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) ax = _check_plot_works(d.plot.area) tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) ax = _check_plot_works(d.plot.area, stacked=False) tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) def test_line_use_index_false(self): s = Series([1, 2, 3], index=['a', 'b', 'c']) s.index.name = 'The Index' _, ax = self.plt.subplots() ax = s.plot(use_index=False, ax=ax) label = ax.get_xlabel() assert label == '' _, ax = self.plt.subplots() ax2 = s.plot.bar(use_index=False, ax=ax) label2 = ax2.get_xlabel() assert label2 == '' @slow def test_bar_log(self): expected = np.array([1., 10., 100., 1000.]) if not self.mpl_le_1_2_1: expected = np.hstack((.1, expected, 1e4)) _, ax = self.plt.subplots() ax = Series([200, 500]).plot.bar(log=True, ax=ax) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() _, ax = self.plt.subplots() ax = Series([200, 500]).plot.barh(log=True, ax=ax) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) tm.close() # GH 9905 expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00]) if not self.mpl_le_1_2_1: expected = np.hstack((1.0e-04, expected, 1.0e+01)) if self.mpl_ge_2_0_0: expected = np.hstack((1.0e-05, expected)) _, ax = self.plt.subplots() ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar', ax=ax) ymin = 0.0007943282347242822 if self.mpl_ge_2_0_0 else 0.001 ymax = 0.12589254117941673 if self.mpl_ge_2_0_0 else .10000000000000001 res = ax.get_ylim() tm.assert_almost_equal(res[0], ymin) tm.assert_almost_equal(res[1], ymax) tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) tm.close() _, ax = self.plt.subplots() ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh', ax=ax) res = ax.get_xlim() tm.assert_almost_equal(res[0], ymin) tm.assert_almost_equal(res[1], ymax) tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected) @slow def test_bar_ignore_index(self): df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) _, ax = self.plt.subplots() ax = df.plot.bar(use_index=False, ax=ax) self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3']) def test_rotation(self): df = DataFrame(randn(5, 5)) # Default rot 0 _, ax = self.plt.subplots() axes = df.plot(ax=ax) self._check_ticks_props(axes, xrot=0) _, ax = self.plt.subplots() axes = df.plot(rot=30, ax=ax) self._check_ticks_props(axes, xrot=30) def test_irregular_datetime(self): rng = date_range('1/1/2000', '3/1/2000') rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] ser = Series(randn(len(rng)), rng) _, ax = self.plt.subplots() ax = ser.plot(ax=ax) xp = datetime(1999, 1, 1).toordinal() ax.set_xlim('1/1/1999', '1/1/2001') assert xp == ax.get_xlim()[0] @slow def test_pie_series(self): # if sum of values is less than 1.0, pie handle them as rate and draw # semicircle. series = Series(np.random.randint(1, 5), index=['a', 'b', 'c', 'd', 'e'], name='YLABEL') ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, series.index) assert ax.get_ylabel() == 'YLABEL' # without wedge labels ax = _check_plot_works(series.plot.pie, labels=None) self._check_text_labels(ax.texts, [''] * 5) # with less colors than elements color_args = ['r', 'g', 'b'] ax = _check_plot_works(series.plot.pie, colors=color_args) color_expected = ['r', 'g', 'b', 'r', 'g'] self._check_colors(ax.patches, facecolors=color_expected) # with labels and colors labels = ['A', 'B', 'C', 'D', 'E'] color_args = ['r', 'g', 'b', 'c', 'm'] ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) self._check_text_labels(ax.texts, labels) self._check_colors(ax.patches, facecolors=color_args) # with autopct and fontsize ax = _check_plot_works(series.plot.pie, colors=color_args, autopct='%.2f', fontsize=7) pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())] iters = [iter(series.index), iter(pcts)] expected_texts = list(next(it) for it in itertools.cycle(iters)) self._check_text_labels(ax.texts, expected_texts) for t in ax.texts: assert t.get_fontsize() == 7 # includes negative value with pytest.raises(ValueError): series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e']) series.plot.pie() # includes nan series = Series([1, 2, np.nan, 4], index=['a', 'b', 'c', 'd'], name='YLABEL') ax = _check_plot_works(series.plot.pie) self._check_text_labels(ax.texts, ['a', 'b', '', 'd']) def test_pie_nan(self): s = Series([1, np.nan, 1, 1]) _, ax = self.plt.subplots() ax = s.plot.pie(legend=True, ax=ax) expected = ['0', '', '2', '3'] result = [x.get_text() for x in ax.texts] assert result == expected @slow def test_hist_df_kwargs(self): df = DataFrame(np.random.randn(10, 2)) _, ax = self.plt.subplots() ax = df.plot.hist(bins=5, ax=ax) assert len(ax.patches) == 10 @slow def test_hist_df_with_nonnumerics(self): # GH 9853 with tm.RNGContext(1): df = DataFrame( np.random.randn(10, 4), columns=['A', 'B', 'C', 'D']) df['E'] = ['x', 'y'] * 5 _, ax = self.plt.subplots() ax = df.plot.hist(bins=5, ax=ax) assert len(ax.patches) == 20 _, ax = self.plt.subplots() ax = df.plot.hist(ax=ax) # bins=10 assert len(ax.patches) == 40 @slow def test_hist_legacy(self): _check_plot_works(self.ts.hist) _check_plot_works(self.ts.hist, grid=False) _check_plot_works(self.ts.hist, figsize=(8, 10)) # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning): _check_plot_works(self.ts.hist, by=self.ts.index.month) with tm.assert_produces_warning(UserWarning): _check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5) fig, ax = self.plt.subplots(1, 1) _check_plot_works(self.ts.hist, ax=ax) _check_plot_works(self.ts.hist, ax=ax, figure=fig) _check_plot_works(self.ts.hist, figure=fig) tm.close() fig, (ax1, ax2) = self.plt.subplots(1, 2) _check_plot_works(self.ts.hist, figure=fig, ax=ax1) _check_plot_works(self.ts.hist, figure=fig, ax=ax2) with pytest.raises(ValueError): self.ts.hist(by=self.ts.index, figure=fig) @slow def test_hist_bins_legacy(self): df = DataFrame(np.random.randn(10, 2)) ax = df.hist(bins=2)[0][0] assert len(ax.patches) == 2 @slow def test_hist_layout(self): df = self.hist_df with pytest.raises(ValueError): df.height.hist(layout=(1, 1)) with pytest.raises(ValueError): df.height.hist(layout=[1, 1]) @slow def test_hist_layout_with_by(self): df = self.hist_df # _check_plot_works adds an ax so catch warning. see GH #13188 with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) self._check_axes_shape(axes, axes_num=2, layout=(2, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.gender, layout=(3, -1)) self._check_axes_shape(axes, axes_num=2, layout=(3, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(4, 1)) self._check_axes_shape(axes, axes_num=4, layout=(4, 1)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(2, -1)) self._check_axes_shape(axes, axes_num=4, layout=(2, 2)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(3, -1)) self._check_axes_shape(axes, axes_num=4, layout=(3, 2)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.category, layout=(-1, 4)) self._check_axes_shape(axes, axes_num=4, layout=(1, 4)) with tm.assert_produces_warning(UserWarning): axes = _check_plot_works(df.height.hist, by=df.classroom, layout=(2, 2)) self._check_axes_shape(axes, axes_num=3, layout=(2, 2)) axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) @slow def test_hist_no_overlap(self): from matplotlib.pyplot import subplot, gcf x = Series(randn(2)) y = Series(randn(2)) subplot(121) x.hist() subplot(122) y.hist() fig = gcf() axes = fig.axes if self.mpl_ge_1_5_0 else fig.get_axes() assert len(axes) == 2 @slow def test_hist_secondary_legend(self): # GH 9610 df = DataFrame(np.random.randn(30, 4), columns=list('abcd')) # primary -> secondary _, ax = self.plt.subplots() ax = df['a'].plot.hist(legend=True, ax=ax) df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b (right)']) assert ax.get_yaxis().get_visible() assert ax.right_ax.get_yaxis().get_visible() tm.close() # secondary -> secondary _, ax = self.plt.subplots() ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax) df['b'].plot.hist(ax=ax, legend=True, secondary_y=True) # both legends are draw on left ax # left axis must be invisible, right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)']) assert not ax.left_ax.get_yaxis().get_visible() assert ax.get_yaxis().get_visible() tm.close() # secondary -> primary _, ax = self.plt.subplots() ax = df['a'].plot.hist(legend=True, secondary_y=True, ax=ax) # right axes is returned df['b'].plot.hist(ax=ax, legend=True) # both legends are draw on left ax # left and right axis must be visible self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b']) assert ax.left_ax.get_yaxis().get_visible() assert ax.get_yaxis().get_visible() tm.close() @slow def test_df_series_secondary_legend(self): # GH 9779 df = DataFrame(np.random.randn(30, 3), columns=list('abc')) s = Series(np.random.randn(30), name='x') # primary -> secondary (without passing ax) _, ax = self.plt.subplots() ax = df.plot(ax=ax) s.plot(legend=True, secondary_y=True, ax=ax) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)']) assert ax.get_yaxis().get_visible() assert ax.right_ax.get_yaxis().get_visible() tm.close() # primary -> secondary (with passing ax) _, ax = self.plt.subplots() ax = df.plot(ax=ax) s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left and right axis must be visible self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)']) assert ax.get_yaxis().get_visible() assert ax.right_ax.get_yaxis().get_visible() tm.close() # seconcary -> secondary (without passing ax) _, ax = self.plt.subplots() ax = df.plot(secondary_y=True, ax=ax) s.plot(legend=True, secondary_y=True, ax=ax) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)'] self._check_legend_labels(ax.left_ax, labels=expected) assert not ax.left_ax.get_yaxis().get_visible() assert ax.get_yaxis().get_visible() tm.close() # secondary -> secondary (with passing ax) _, ax = self.plt.subplots() ax = df.plot(secondary_y=True, ax=ax) s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)'] self._check_legend_labels(ax.left_ax, expected) assert not ax.left_ax.get_yaxis().get_visible() assert ax.get_yaxis().get_visible() tm.close() # secondary -> secondary (with passing ax) _, ax = self.plt.subplots() ax = df.plot(secondary_y=True, mark_right=False, ax=ax) s.plot(ax=ax, legend=True, secondary_y=True) # both legends are dran on left ax # left axis must be invisible and right axis must be visible expected = ['a', 'b', 'c', 'x (right)'] self._check_legend_labels(ax.left_ax, expected) assert not ax.left_ax.get_yaxis().get_visible() assert ax.get_yaxis().get_visible() tm.close() @slow def test_plot_fails_with_dupe_color_and_style(self): x = Series(randn(2)) with pytest.raises(ValueError): _, ax = self.plt.subplots() x.plot(style='k--', color='k', ax=ax) @slow def test_hist_kde(self): _, ax = self.plt.subplots() ax = self.ts.plot.hist(logy=True, ax=ax) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() # ticks are values, thus ticklabels are blank self._check_text_labels(xlabels, [''] * len(xlabels)) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _check_plot_works(self.ts.plot.kde) _check_plot_works(self.ts.plot.density) _, ax = self.plt.subplots() ax = self.ts.plot.kde(logy=True, ax=ax) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [''] * len(xlabels)) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) @slow def test_kde_kwargs(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() from numpy import linspace _check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100, 100, 20)) _check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100, 100, 20)) _, ax = self.plt.subplots() ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100, 100, 20), ax=ax) self._check_ax_scales(ax, yaxis='log') self._check_text_labels(ax.yaxis.get_label(), 'Density') @slow def test_kde_missing_vals(self): tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() s = Series(np.random.uniform(size=50)) s[0] = np.nan axes = _check_plot_works(s.plot.kde) # gh-14821: check if the values have any missing values assert any(~np.isnan(axes.lines[0].get_xdata())) @slow def test_hist_kwargs(self): _, ax = self.plt.subplots() ax = self.ts.plot.hist(bins=5, ax=ax) assert len(ax.patches) == 5 self._check_text_labels(ax.yaxis.get_label(), 'Frequency') tm.close() if self.mpl_ge_1_3_1: _, ax = self.plt.subplots() ax = self.ts.plot.hist(orientation='horizontal', ax=ax) self._check_text_labels(ax.xaxis.get_label(), 'Frequency') tm.close() _, ax = self.plt.subplots() ax = self.ts.plot.hist(align='left', stacked=True, ax=ax) tm.close() @slow def test_hist_kde_color(self): _, ax = self.plt.subplots() ax = self.ts.plot.hist(logy=True, bins=10, color='b', ax=ax) self._check_ax_scales(ax, yaxis='log') assert len(ax.patches) == 10 self._check_colors(ax.patches, facecolors=['b'] * 10) tm._skip_if_no_scipy() _skip_if_no_scipy_gaussian_kde() _, ax = self.plt.subplots() ax = self.ts.plot.kde(logy=True, color='r', ax=ax) self._check_ax_scales(ax, yaxis='log') lines = ax.get_lines() assert len(lines) == 1 self._check_colors(lines, ['r']) @slow def test_boxplot_series(self): _, ax = self.plt.subplots() ax = self.ts.plot.box(logy=True, ax=ax) self._check_ax_scales(ax, yaxis='log') xlabels = ax.get_xticklabels() self._check_text_labels(xlabels, [self.ts.name]) ylabels = ax.get_yticklabels() self._check_text_labels(ylabels, [''] * len(ylabels)) @slow def test_kind_both_ways(self): s = Series(range(3)) kinds = (plotting._core._common_kinds + plotting._core._series_kinds) _, ax = self.plt.subplots() for kind in kinds: if not _ok_for_gaussian_kde(kind): continue s.plot(kind=kind, ax=ax) getattr(s.plot, kind)() @slow def test_invalid_plot_data(self): s = Series(list('abcd')) _, ax = self.plt.subplots() for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with pytest.raises(TypeError): s.plot(kind=kind, ax=ax) @slow def test_valid_object_plot(self): s = Series(lrange(10), dtype=object) for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue _check_plot_works(s.plot, kind=kind) def test_partially_invalid_plot_data(self): s = Series(['a', 'b', 1.0, 2]) _, ax = self.plt.subplots() for kind in plotting._core._common_kinds: if not _ok_for_gaussian_kde(kind): continue with pytest.raises(TypeError): s.plot(kind=kind, ax=ax) def test_invalid_kind(self): s = Series([1, 2]) with pytest.raises(ValueError): s.plot(kind='aasdf') @slow def test_dup_datetime_index_plot(self): dr1 = date_range('1/1/2009', periods=4) dr2 = date_range('1/2/2009', periods=4) index = dr1.append(dr2) values = randn(index.size) s = Series(values, index=index) _check_plot_works(s.plot) @slow def test_errorbar_plot(self): s = Series(np.arange(10), name='x') s_err = np.random.randn(10) d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y']) # test line and bar plots kinds = ['line', 'bar'] for kind in kinds: ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err, kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, yerr=d_err, kind=kind) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind) self._check_has_errorbars(ax, xerr=1, yerr=1) ax = _check_plot_works(s.plot, xerr=s_err) self._check_has_errorbars(ax, xerr=1, yerr=0) # test time series plotting ix = date_range('1/1/2000', '1/1/2001', freq='M') ts = Series(np.arange(12), index=ix, name='x') ts_err = Series(np.random.randn(12), index=ix) td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y']) ax = _check_plot_works(ts.plot, yerr=ts_err) self._check_has_errorbars(ax, xerr=0, yerr=1) ax = _check_plot_works(ts.plot, yerr=td_err) self._check_has_errorbars(ax, xerr=0, yerr=1) # check incorrect lengths and types with pytest.raises(ValueError): s.plot(yerr=np.arange(11)) s_err = ['zzz'] * 10 # in mpl 1.5+ this is a TypeError with pytest.raises((ValueError, TypeError)): s.plot(yerr=s_err) def test_table(self): _check_plot_works(self.series.plot, table=True) _check_plot_works(self.series.plot, table=self.series) @slow def test_series_grid_settings(self): # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 self._check_grid_settings(Series([1, 2, 3]), plotting._core._series_kinds + plotting._core._common_kinds) @slow def test_standard_colors(self): from pandas.plotting._style import _get_standard_colors for c in ['r', 'red', 'green', '#FF0000']: result = _get_standard_colors(1, color=c) assert result == [c] result = _get_standard_colors(1, color=[c]) assert result == [c] result = _get_standard_colors(3, color=c) assert result == [c] * 3 result = _get_standard_colors(3, color=[c]) assert result == [c] * 3 @slow def test_standard_colors_all(self): import matplotlib.colors as colors from pandas.plotting._style import _get_standard_colors # multiple colors like mediumaquamarine for c in colors.cnames: result = _get_standard_colors(num_colors=1, color=c) assert result == [c] result = _get_standard_colors(num_colors=1, color=[c]) assert result == [c] result = _get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 result = _get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 # single letter colors like k for c in colors.ColorConverter.colors: result = _get_standard_colors(num_colors=1, color=c) assert result == [c] result = _get_standard_colors(num_colors=1, color=[c]) assert result == [c] result = _get_standard_colors(num_colors=3, color=c) assert result == [c] * 3 result = _get_standard_colors(num_colors=3, color=[c]) assert result == [c] * 3 def test_series_plot_color_kwargs(self): # GH1890 _, ax = self.plt.subplots() ax = Series(np.arange(12) + 1).plot(color='green', ax=ax) self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_kwargs(self): # #1890 _, ax = self.plt.subplots() ax = Series(np.arange(12) + 1, index=date_range( '1/1/2000', periods=12)).plot(color='green', ax=ax) self._check_colors(ax.get_lines(), linecolors=['green']) def test_time_series_plot_color_with_empty_kwargs(self): import matplotlib as mpl if self.mpl_ge_1_5_0: def_colors = self._maybe_unpack_cycler(mpl.rcParams) else: def_colors = mpl.rcParams['axes.color_cycle'] index = date_range('1/1/2000', periods=12) s = Series(np.arange(1, 13), index=index) ncolors = 3 _, ax = self.plt.subplots() for i in range(ncolors): ax = s.plot(ax=ax) self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) def test_xticklabels(self): # GH11529 s = Series(np.arange(10), index=['P%02d' % i for i in range(10)]) _, ax = self.plt.subplots() ax = s.plot(xticks=[0, 3, 5, 9], ax=ax) exp = ['P%02d' % i for i in [0, 3, 5, 9]] self._check_text_labels(ax.get_xticklabels(), exp) def test_custom_business_day_freq(self): # GH7222 from pandas.tseries.offsets import CustomBusinessDay s = Series(range(100, 121), index=pd.bdate_range( start='2014-05-01', end='2014-06-01', freq=CustomBusinessDay(holidays=['2014-05-26']))) _check_plot_works(s.plot)
bsd-3-clause
terkkila/scikit-learn
sklearn/metrics/setup.py
299
1024
import os import os.path import numpy from numpy.distutils.misc_util import Configuration from sklearn._build_utils import get_blas_info def configuration(parent_package="", top_path=None): config = Configuration("metrics", parent_package, top_path) cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') config.add_extension("pairwise_fast", sources=["pairwise_fast.c"], include_dirs=[os.path.join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], libraries=cblas_libs, extra_compile_args=blas_info.pop('extra_compile_args', []), **blas_info) return config if __name__ == "__main__": from numpy.distutils.core import setup setup(**configuration().todict())
bsd-3-clause
kevalds51/sympy
sympy/interactive/tests/test_ipythonprinting.py
21
6055
"""Tests that the IPython printing module is properly loaded. """ from sympy.core.compatibility import u from sympy.interactive.session import init_ipython_session from sympy.external import import_module from sympy.utilities.pytest import raises # run_cell was added in IPython 0.11 ipython = import_module("IPython", min_module_version="0.11") # disable tests if ipython is not present if not ipython: disabled = True def test_ipythonprinting(): # Initialize and setup IPython session app = init_ipython_session() app.run_cell("ip = get_ipython()") app.run_cell("inst = ip.instance()") app.run_cell("format = inst.display_formatter.format") app.run_cell("from sympy import Symbol") # Printing without printing extension app.run_cell("a = format(Symbol('pi'))") app.run_cell("a2 = format(Symbol('pi')**2)") # Deal with API change starting at IPython 1.0 if int(ipython.__version__.split(".")[0]) < 1: assert app.user_ns['a']['text/plain'] == "pi" assert app.user_ns['a2']['text/plain'] == "pi**2" else: assert app.user_ns['a'][0]['text/plain'] == "pi" assert app.user_ns['a2'][0]['text/plain'] == "pi**2" # Load printing extension app.run_cell("from sympy import init_printing") app.run_cell("init_printing()") # Printing with printing extension app.run_cell("a = format(Symbol('pi'))") app.run_cell("a2 = format(Symbol('pi')**2)") # Deal with API change starting at IPython 1.0 if int(ipython.__version__.split(".")[0]) < 1: assert app.user_ns['a']['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi') assert app.user_ns['a2']['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ') else: assert app.user_ns['a'][0]['text/plain'] in (u('\N{GREEK SMALL LETTER PI}'), 'pi') assert app.user_ns['a2'][0]['text/plain'] in (u(' 2\n\N{GREEK SMALL LETTER PI} '), ' 2\npi ') def test_print_builtin_option(): # Initialize and setup IPython session app = init_ipython_session() app.run_cell("ip = get_ipython()") app.run_cell("inst = ip.instance()") app.run_cell("format = inst.display_formatter.format") app.run_cell("from sympy import Symbol") app.run_cell("from sympy import init_printing") app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})") # Deal with API change starting at IPython 1.0 if int(ipython.__version__.split(".")[0]) < 1: text = app.user_ns['a']['text/plain'] raises(KeyError, lambda: app.user_ns['a']['text/latex']) else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) # Note : In Python 3 the text is unicode, but in 2 it is a string. # XXX: How can we make this ignore the terminal width? This test fails if # the terminal is too narrow. assert text in ("{pi: 3.14, n_i: 3}", u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'), "{n_i: 3, pi: 3.14}", u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')) # If we enable the default printing, then the dictionary's should render # as a LaTeX version of the whole dict: ${\pi: 3.14, n_i: 3}$ app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True") app.run_cell("init_printing(use_latex=True)") app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})") # Deal with API change starting at IPython 1.0 if int(ipython.__version__.split(".")[0]) < 1: text = app.user_ns['a']['text/plain'] latex = app.user_ns['a']['text/latex'] else: text = app.user_ns['a'][0]['text/plain'] latex = app.user_ns['a'][0]['text/latex'] assert text in ("{pi: 3.14, n_i: 3}", u('{n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3, \N{GREEK SMALL LETTER PI}: 3.14}'), "{n_i: 3, pi: 3.14}", u('{\N{GREEK SMALL LETTER PI}: 3.14, n\N{LATIN SUBSCRIPT SMALL LETTER I}: 3}')) assert latex == r'$$\left \{ n_{i} : 3, \quad \pi : 3.14\right \}$$' app.run_cell("inst.display_formatter.formatters['text/latex'].enabled = True") app.run_cell("init_printing(use_latex=True, print_builtin=False)") app.run_cell("a = format({Symbol('pi'): 3.14, Symbol('n_i'): 3})") # Deal with API change starting at IPython 1.0 if int(ipython.__version__.split(".")[0]) < 1: text = app.user_ns['a']['text/plain'] raises(KeyError, lambda: app.user_ns['a']['text/latex']) else: text = app.user_ns['a'][0]['text/plain'] raises(KeyError, lambda: app.user_ns['a'][0]['text/latex']) # Note : In Python 3 the text is unicode, but in 2 it is a string. # Python 3.3.3 + IPython 0.13.2 gives: '{n_i: 3, pi: 3.14}' # Python 3.3.3 + IPython 1.1.0 gives: '{n_i: 3, pi: 3.14}' # Python 2.7.5 + IPython 1.1.0 gives: '{pi: 3.14, n_i: 3}' assert text in ("{pi: 3.14, n_i: 3}", "{n_i: 3, pi: 3.14}") def test_matplotlib_bad_latex(): # Initialize and setup IPython session app = init_ipython_session() app.run_cell("import IPython") app.run_cell("ip = get_ipython()") app.run_cell("inst = ip.instance()") app.run_cell("format = inst.display_formatter.format") app.run_cell("from sympy import init_printing, Matrix") app.run_cell("init_printing(use_latex='matplotlib')") # The png formatter is not enabled by default in this context app.run_cell("inst.display_formatter.formatters['image/png'].enabled = True") # Make sure no warnings are raised by IPython app.run_cell("import warnings") app.run_cell("warnings.simplefilter('error', IPython.core.formatters.FormatterWarning)") # This should not raise an exception app.run_cell("a = format(Matrix([1, 2, 3]))") # issue 9799 app.run_cell("from sympy import Piecewise, Symbol, Eq") app.run_cell("x = Symbol('x'); pw = format(Piecewise((1, Eq(x, 0)), (0, True)))")
bsd-3-clause
sjperkins/tensorflow
tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
153
6723
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """sklearn cross-support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import numpy as np import six def _pprint(d): return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()]) class _BaseEstimator(object): """This is a cross-import when sklearn is not available. Adopted from sklearn.BaseEstimator implementation. https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py """ def get_params(self, deep=True): """Get parameters for this estimator. Args: deep: boolean, optional If `True`, will return the parameters for this estimator and contained subobjects that are estimators. Returns: params : mapping of string to any Parameter names mapped to their values. """ out = dict() param_names = [name for name in self.__dict__ if not name.startswith('_')] for key in param_names: value = getattr(self, key, None) if isinstance(value, collections.Callable): continue # XXX: should we rather test if instance of estimator? if deep and hasattr(value, 'get_params'): deep_items = value.get_params().items() out.update((key + '__' + k, val) for k, val in deep_items) out[key] = value return out def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as pipelines). The former have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Args: **params: Parameters. Returns: self Raises: ValueError: If params contain invalid names. """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def __repr__(self): class_name = self.__class__.__name__ return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False)),) # pylint: disable=old-style-class class _ClassifierMixin(): """Mixin class for all classifiers.""" pass class _RegressorMixin(): """Mixin class for all regression estimators.""" pass class _TransformerMixin(): """Mixin class for all transformer estimators.""" class NotFittedError(ValueError, AttributeError): """Exception class to raise if estimator is used before fitting. This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. Examples: >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import NotFittedError >>> try: ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) ... except NotFittedError as e: ... print(repr(e)) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS NotFittedError('This LinearSVC instance is not fitted yet',) Copied from https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py """ # pylint: enable=old-style-class def _accuracy_score(y_true, y_pred): score = y_true == y_pred return np.average(score) def _mean_squared_error(y_true, y_pred): if len(y_true.shape) > 1: y_true = np.squeeze(y_true) if len(y_pred.shape) > 1: y_pred = np.squeeze(y_pred) return np.average((y_true - y_pred)**2) def _train_test_split(*args, **options): # pylint: disable=missing-docstring test_size = options.pop('test_size', None) train_size = options.pop('train_size', None) random_state = options.pop('random_state', None) if test_size is None and train_size is None: train_size = 0.75 elif train_size is None: train_size = 1 - test_size train_size = int(train_size * args[0].shape[0]) np.random.seed(random_state) indices = np.random.permutation(args[0].shape[0]) train_idx, test_idx = indices[:train_size], indices[train_size:] result = [] for x in args: result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)] return tuple(result) # If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn. TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False) if TRY_IMPORT_SKLEARN: # pylint: disable=g-import-not-at-top,g-multiple-import,unused-import from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin from sklearn.metrics import accuracy_score, log_loss, mean_squared_error from sklearn.cross_validation import train_test_split try: from sklearn.exceptions import NotFittedError except ImportError: try: from sklearn.utils.validation import NotFittedError except ImportError: pass else: # Naive implementations of sklearn classes and functions. BaseEstimator = _BaseEstimator ClassifierMixin = _ClassifierMixin RegressorMixin = _RegressorMixin TransformerMixin = _TransformerMixin accuracy_score = _accuracy_score log_loss = None mean_squared_error = _mean_squared_error train_test_split = _train_test_split
apache-2.0
Neha-Setia/Pearson-Api
welcome.py
1
5817
from flask import Flask, render_template, request, jsonify, url_for, send_from_directory import json,os,pymongo,ssl from watson_developer_cloud import DiscoveryV1 from werkzeug.utils import secure_filename import base64, pandas as pd from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import TfidfVectorizer from werkzeug.datastructures import FileStorage MONGODB_URL = "mongodb://admin:[email protected]:25934,bluemix-sandbox-dal-9-portal.6.dblayer.com:25934/admin?ssl=true" # MONGODB_URL = os.environ.get('MONGODB_URL') client = pymongo.MongoClient(MONGODB_URL, ssl_cert_reqs=ssl.CERT_NONE) print(client) db = client.discovery_db coll = db.Pearsons_maths_dictionary UPLOAD_FOLDER = './uploads' app = Flask(__name__) app._static_folder = './static' app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER discovery = DiscoveryV1( '2016-11-07', username='5f10f086-cd37-4386-bffe-f4e829e589a8', password='crMQG1F3jQuR') environments = discovery.get_environments() print(environments) my_environment = [x for x in environments['environments'] if x['name'] == 'byod'] environment_id = my_environment[0]['environment_id'] print(environment_id) ##################### COLLECTION ID FOR EDU DCOS ################## collections = discovery.list_collections(environment_id) doc_collection = [x for x in collections['collections'] if x['name'] == 'pearson-api'] doc_collection_id = doc_collection[0]['collection_id'] print(doc_collection_id) #4ee23389-04fa-4ecf-9811-957bcecb19b4 configuration_id = discovery.get_default_configuration_id(environment_id=environment_id) print(configuration_id) # api_endpoint = "https://gateway.watsonplatform.net/discovery/api/v1" username = '5f10f086-cd37-4386-bffe-f4e829e589a8' password = 'crMQG1F3jQuR' document_id_list =[] def decode_base64(data): """Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'=' * (4 - missing_padding) return base64.b64decode(data) @app.route("/") def main(): return render_template('document-upload.html') @app.route("/upload", methods=['POST']) def upload_document(): print("upload") print(request) print(request.data) file = request.data file = file.split(',') file = decode_base64(file[1]) print(file) file1 = FileStorage(stream=file, name="division.pdf") print(file1.stream) # f = file1.filename # print(f) # # file2 = stream.read() # # print(file2) # # file_result = open('./uploads/filename1.json', 'wb') # create a writable file and write the decoding result # # file_result.write(file) # # # with open(os.path.join(app.config['UPLOAD_FOLDER'], 'filename1.json')) as fileinfo: # # print(fileinfo) # files=[] # files.append((f, FileStorage(stream=file, filename="division.pdf"))) # print(files[0]) add_doc = discovery.add_document(environment_id, doc_collection_id, file_info=file1) d = json.dumps(add_doc, indent=2) print(d) document_id_list.append(add_doc['document_id'][0]) print(document_id_list[0]) print("hi") qopts = {'query': ''} my_query = discovery.query(environment_id, doc_collection_id, qopts) print(my_query) # data = [q['enriched_text'] for q in my_query['results']] data = my_query['results'][0]['enriched_text'] print(data) data = [q['text'] for q in data['keywords']] print(data) data = ' '.join([str(item) for item in data]) print(data) cursor_cl = coll.find({"class_name": "Class 3", "subject_name": "Mathematics"}) print(cursor_cl) # # ----> comb = [] myList = [] print(cursor_cl.count()) conc_weightage = cursor_cl.count() if conc_weightage > 0: for selected_content_db in cursor_cl: match1 = [] part_kw1 = selected_content_db['keywords'].split(",") instruction_id = selected_content_db['sctid'] filt_semi_kw1 = [let.encode('utf-8') for let in part_kw1] contend = ' '.join(filt_semi_kw1) print(contend) train_set = [contend, data] print(train_set) tfidf_vectorizer = TfidfVectorizer() tfidf_matrix_train = tfidf_vectorizer.fit_transform(train_set) cosine_score = cosine_similarity(tfidf_matrix_train) cosine_score = cosine_score.tolist() print(cosine_score) cosine_score = cosine_score[0] cosine_score = cosine_score[1] parser_iddetails = instruction_id parser_score = cosine_score myList.append(cosine_score) comb.append([instruction_id, cosine_score]) print(instruction_id) print(cosine_score) df = pd.DataFrame(comb) print(df) render_template('result.html', tables=[df.to_html(classes='table', index=False)]) return 'file upload successful' @app.route("/getmetaData", methods=['GET']) def get_document(): qopts = {'query': ''} my_query = discovery.query(environment_id, doc_collection_id, qopts) json_it = jsonify(my_query) print(type(json_it)) # return render_template("Json-Result.html", data=json_it) return json_it @app.route("/delete_document", methods=['GET']) def delete_document(): for document_id in document_id_list: delete_doc = discovery.delete_document(environment_id, doc_collection_id, document_id) print(json.dumps(delete_doc, indent=2)) return json.dumps(delete_doc, indent=2) port = os.getenv('PORT', '5000') if __name__ == "__main__": app.run(host='0.0.0.0', port=int(port))
apache-2.0
yuyuz/FLASH
HPOlib/Plotting/plotTrace_perExp.py
5
6055
#!/usr/bin/env python ## # wrapping: A program making it easy to use hyperparameter # optimization software. # Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from argparse import ArgumentParser import cPickle import itertools import sys from matplotlib.pyplot import tight_layout, figure, subplots_adjust, subplot, savefig, show import matplotlib.gridspec import numpy as np from HPOlib.Plotting import plot_util __authors__ = ["Katharina Eggensperger", "Matthias Feurer"] __contact__ = "automl.org" def plot_optimization_trace_cv(trial_list, name_list, optimum=0, title="", log=True, save="", y_max=0, y_min=0): markers =plot_util.get_plot_markers() colors = plot_util.get_plot_colors() linestyles = itertools.cycle(['-']) size = 1 ratio = 5 gs = matplotlib.gridspec.GridSpec(ratio, 1) fig = figure(1, dpi=100) fig.suptitle(title, fontsize=16) ax1 = subplot(gs[0:ratio, :]) ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) min_val = sys.maxint max_val = -sys.maxint max_trials = 0 fig.suptitle(title, fontsize=16) # Plot the average error and std for i in range(len(name_list)): m = markers.next() c = colors.next() l = linestyles.next() leg = False for tr in trial_list[i]: if log: tr = np.log10(tr) x = range(1, len(tr)+1) y = tr if not leg: ax1.plot(x, y, color=c, linewidth=size, linestyle=l, label=name_list[i][0]) leg = True ax1.plot(x, y, color=c, linewidth=size, linestyle=l) min_val = min(min_val, min(tr)) max_val = max(max_val, max(tr)) max_trials = max(max_trials, len(tr)) # Maybe plot on logscale ylabel = "" if log: ax1.set_ylabel("log10(Minfunction value)" + ylabel) else: ax1.set_ylabel("Minfunction value" + ylabel) # Descript and label the stuff leg = ax1.legend(loc='best', fancybox=True) leg.get_frame().set_alpha(0.5) ax1.set_xlabel("#Function evaluations") if y_max == y_min: # Set axes limits ax1.set_ylim([min_val-0.1*abs((max_val-min_val)), max_val+0.1*abs((max_val-min_val))]) else: ax1.set_ylim([y_min, y_max]) ax1.set_xlim([0, max_trials + 1]) tight_layout() subplots_adjust(top=0.85) if save != "": savefig(save, dpi=100, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches="tight", pad_inches=0.1) else: show() def main(pkl_list, name_list, autofill, optimum=0, save="", title="", log=False, y_min=0, y_max=0): trial_list = list() for i in range(len(pkl_list)): tmp_trial_list = list() max_len = -sys.maxint for pkl in pkl_list[i]: fh = open(pkl, "r") trials = cPickle.load(fh) fh.close() trace = plot_util.get_Trace_cv(trials) tmp_trial_list.append(trace) max_len = max(max_len, len(trace)) trial_list.append(list()) for tr in tmp_trial_list: # if len(tr) < max_len: # tr.extend([tr[-1] for idx in range(abs(max_len - len(tr)))]) trial_list[-1].append(np.array(tr)) plot_optimization_trace_cv(trial_list, name_list, optimum, title=title, log=log, save=save, y_min=y_min, y_max=y_max) if save != "": sys.stdout.write("Saved plot to " + save + "\n") else: sys.stdout.write("..Done\n") if __name__ == "__main__": prog = "python plotTraceWithStd.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]" description = "Plot a Trace with std for multiple experiments" parser = ArgumentParser(description=description, prog=prog) # Options for specific benchmarks parser.add_argument("-o", "--optimum", type=float, dest="optimum", default=0, help="If not set, the optimum is supposed to be zero") # Options which are available only for this plot parser.add_argument("-a", "--autofill", action="store_true", dest="autofill", default=False, help="Fill trace automatically") # General Options parser.add_argument("-l", "--log", action="store_true", dest="log", default=False, help="Plot on log scale") parser.add_argument("--max", dest="max", type=float, default=0, help="Maximum of the plot") parser.add_argument("--min", dest="min", type=float, default=0, help="Minimum of the plot") parser.add_argument("-s", "--save", dest="save", default="", help="Where to save plot instead of showing it?") parser.add_argument("-t", "--title", dest="title", default="", help="Optional supertitle for plot") args, unknown = parser.parse_known_args() sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n") pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown) main(pkl_list=pkl_list_main, name_list=name_list_main, autofill=args.autofill, optimum=args.optimum, save=args.save, title=args.title, log=args.log, y_min=args.min, y_max=args.max)
gpl-3.0
sam81/pychoacoustics
pychoacoustics/win_UML_est_guess_parspace_plot.py
1
21278
# -*- coding: utf-8 -*- # Copyright (C) 2008-2020 Samuele Carcagno <[email protected]> # This file is part of pychoacoustics # pychoacoustics is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # pychoacoustics is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with pychoacoustics. If not, see <http://www.gnu.org/licenses/>. from __future__ import nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals import matplotlib from cycler import cycler from .pyqtver import* if pyqtversion == 4: from PyQt4 import QtGui, QtCore from PyQt4.QtGui import QCheckBox, QIcon, QHBoxLayout, QMainWindow, QPushButton, QVBoxLayout, QWidget # import the Qt4Agg FigureCanvas object, that binds Figure to # Qt4Agg backend. It also inherits from QWidget from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas # import the NavigationToolbar Qt4Agg widget from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar matplotlib.rcParams['backend'] = "Qt4Agg" matplotlib.rcParams['backend.qt4'] = "PyQt4" elif pyqtversion == -4: from PySide import QtGui, QtCore from PySide.QtGui import QCheckBox, QIcon, QHBoxLayout, QMainWindow, QPushButton, QVBoxLayout, QWidget # import the Qt4Agg FigureCanvas object, that binds Figure to # Qt4Agg backend. It also inherits from QWidget from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas # import the NavigationToolbar Qt4Agg widget from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar matplotlib.rcParams['backend'] = "Qt4Agg" matplotlib.rcParams['backend.qt4'] = "PySide" elif pyqtversion == 5: from PyQt5 import QtGui, QtCore from PyQt5.QtWidgets import QCheckBox, QHBoxLayout, QMainWindow, QPushButton, QVBoxLayout, QWidget from PyQt5.QtGui import QIcon # import the Qt4Agg FigureCanvas object, that binds Figure to # Qt4Agg backend. It also inherits from QWidget from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas # import the NavigationToolbar Qt4Agg widget from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar matplotlib.rcParams['backend'] = "Qt5Agg" # Matplotlib Figure object from matplotlib.figure import Figure from matplotlib.widgets import Cursor import numpy as np import copy, os from numpy import arange, ceil, floor, linspace, log10 from matplotlib.lines import Line2D import matplotlib.pyplot as plt import matplotlib as mpl #import pandas as pd import matplotlib.font_manager as fm from .pysdt import* from .UML_method_est_guess import* from .utils_general import* #mpl.rcParams['font.family'] = 'sans-serif' #fontPath = os.path.abspath(os.path.dirname(__file__)+'/../') + '/data/Ubuntu-R.ttf' #fontPath = '/media/ntfsShared/lin_home/auditory/code/pychoacoustics/pychoacoustics-qt4/development/dev/data/Ubuntu-R.ttf' #prop = fm.FontProperties(fname=fontPath) #mpl.rcParams.update({'font.size': 16}) class UMLEstGuessRateParSpacePlot(QMainWindow): def __init__(self, parent): QMainWindow.__init__(self, parent) self.setAttribute(QtCore.Qt.WA_DeleteOnClose) #self.prm = prm self.pchs = ["o", "s", "v", "p", "h", "8", "*", "x", "+", "d", ",", "^", "<", ">", "1", "2", "3", "4", "H", "D", ".", "|", "_"] mpl.rcParams['xtick.major.size'] = 6 mpl.rcParams['xtick.minor.size'] = 4 mpl.rcParams['xtick.major.width'] = 1 mpl.rcParams['xtick.minor.width'] = 1 mpl.rcParams['ytick.major.size'] = 9 mpl.rcParams['ytick.minor.size'] = 5 mpl.rcParams['ytick.major.width'] = 0.8 mpl.rcParams['ytick.minor.width'] = 0.8 mpl.rcParams['xtick.direction'] = 'out' mpl.rcParams['ytick.direction'] = 'out' mpl.rcParams['font.size'] = 14 mpl.rcParams['figure.facecolor'] = 'white' mpl.rcParams['lines.color'] = 'black' mpl.rcParams['axes.prop_cycle'] = cycler('color', ["#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7"]) self.mw = QWidget(self) self.vbl = QVBoxLayout(self.mw) self.fig = Figure(figsize=(8,8))#facecolor=self.canvasColor, dpi=self.dpi) self.ax1 = self.fig.add_subplot(221) self.ax2 = self.fig.add_subplot(222) self.ax3 = self.fig.add_subplot(223) self.ax4 = self.fig.add_subplot(224) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.mw) self.ntb = NavigationToolbar(self.canvas, self.mw) self.logAxisMidpoint = QCheckBox(self.tr("Midpoint Log Axis")) self.logAxisMidpoint.stateChanged[int].connect(self.toggleMidpointLogAxis) self.logAxisSlope = QCheckBox(self.tr("Slope Log Axis")) self.logAxisSlope.stateChanged[int].connect(self.toggleSlopeLogAxis) self.logAxisGuess = QCheckBox(self.tr("Guess Log Axis")) self.logAxisGuess.stateChanged[int].connect(self.toggleGuessLogAxis) self.logAxisLapse = QCheckBox(self.tr("Lapse Log Axis")) self.logAxisLapse.stateChanged[int].connect(self.toggleLapseLogAxis) self.updateButton = QPushButton(self.tr("Update"), self) self.updateButton.setIcon(QIcon.fromTheme("view-refresh", QIcon(":/view-refresh"))) self.updateButton.clicked.connect(self.onClickUpdateButton) self.ntbBox = QHBoxLayout() self.ntbBox.addWidget(self.ntb) self.ntbBox.addWidget(self.logAxisMidpoint) self.ntbBox.addWidget(self.logAxisSlope) self.ntbBox.addWidget(self.logAxisGuess) self.ntbBox.addWidget(self.logAxisLapse) self.ntbBox.addWidget(self.updateButton) self.vbl.addWidget(self.canvas) self.vbl.addLayout(self.ntbBox) self.mw.setFocus() self.setCentralWidget(self.mw) self.getUMLPars() if self.stimScaling == "Linear": self.logAxisMidpoint.setChecked(False) self.plotDataMidpoint() elif self.stimScaling == "Logarithmic": self.logAxisMidpoint.setChecked(True) self.plotDataMidpointLogAxis() if self.slopeSpacing == "Linear": self.logAxisSlope.setChecked(False) self.plotDataSlope() elif self.slopeSpacing == "Logarithmic": self.logAxisSlope.setChecked(True) self.plotDataSlopeLogAxis() if self.guessSpacing == "Linear": self.logAxisGuess.setChecked(False) self.plotDataGuess() elif self.guessSpacing == "Logarithmic": self.logAxisGuess.setChecked(True) self.plotDataGuessLogAxis() if self.lapseSpacing == "Linear": self.logAxisLapse.setChecked(False) self.plotDataLapse() elif self.lapseSpacing == "Logarithmic": self.logAxisLapse.setChecked(True) self.plotDataLapseLogAxis() self.fig.suptitle(self.tr("UML Parameter Space")) self.show() self.canvas.draw() def getUMLPars(self): self.psyFun = self.parent().psyFunChooser.currentText() self.loStim = self.parent().currLocale.toDouble(self.parent().loStim.text())[0] self.hiStim = self.parent().currLocale.toDouble(self.parent().hiStim.text())[0] self.stimScaling = self.parent().stimScalingChooser.currentText() self.loMidPoint = self.parent().currLocale.toDouble(self.parent().loMidPoint.text())[0] self.hiMidPoint = self.parent().currLocale.toDouble(self.parent().hiMidPoint.text())[0] self.threshGridStep = self.parent().currLocale.toDouble(self.parent().threshGridStep.text())[0] self.threshPrior = self.parent().threshPriorChooser.currentText() self.threshPriorMu = self.parent().currLocale.toDouble(self.parent().threshPriorMu.text())[0] self.threshPriorSTD = self.parent().currLocale.toDouble(self.parent().threshPriorSTD.text())[0] self.loSlope = self.parent().currLocale.toDouble(self.parent().loSlope.text())[0] self.hiSlope = self.parent().currLocale.toDouble(self.parent().hiSlope.text())[0] self.slopeGridStep = self.parent().currLocale.toDouble(self.parent().slopeGridStep.text())[0] self.slopeSpacing = self.parent().slopeSpacingChooser.currentText() self.slopePrior = self.parent().slopePriorChooser.currentText() self.slopePriorMu = self.parent().currLocale.toDouble(self.parent().slopePriorMu.text())[0] self.slopePriorSTD = self.parent().currLocale.toDouble(self.parent().slopePriorSTD.text())[0] self.loGuess = self.parent().currLocale.toDouble(self.parent().loGuess.text())[0] self.hiGuess = self.parent().currLocale.toDouble(self.parent().hiGuess.text())[0] self.guessGridStep = self.parent().currLocale.toDouble(self.parent().guessGridStep.text())[0] self.guessSpacing = self.parent().guessSpacingChooser.currentText() self.guessPrior = self.parent().guessPriorChooser.currentText() self.guessPriorMu = self.parent().currLocale.toDouble(self.parent().guessPriorMu.text())[0] self.guessPriorSTD = self.parent().currLocale.toDouble(self.parent().guessPriorSTD.text())[0] self.loLapse = self.parent().currLocale.toDouble(self.parent().loLapse.text())[0] self.hiLapse = self.parent().currLocale.toDouble(self.parent().hiLapse.text())[0] self.lapseGridStep = self.parent().currLocale.toDouble(self.parent().lapseGridStep.text())[0] self.lapseSpacing = self.parent().lapseSpacingChooser.currentText() self.lapsePrior = self.parent().lapsePriorChooser.currentText() self.lapsePriorMu = self.parent().currLocale.toDouble(self.parent().lapsePriorMu.text())[0] self.lapsePriorSTD = self.parent().currLocale.toDouble(self.parent().lapsePriorSTD.text())[0] # try: # self.nAlternatives = int(self.parent().nAlternativesChooser.currentText()) # except: # self.nAlternatives = 2 if self.stimScaling == "Linear": self.UML = setupUMLEstGuessRate(model=self.psyFun, swptRule="Up-Down", nDown=2, centTend = "Mean", stimScale = self.stimScaling, x0=1, xLim=(self.loStim, self.hiStim), alphaLim=(self.loMidPoint, self.hiMidPoint), alphaStep=self.threshGridStep, alphaSpacing="Linear", alphaDist=self.threshPrior, alphaMu=self.threshPriorMu, alphaSTD=self.threshPriorSTD, betaLim=(self.loSlope, self.hiSlope), betaStep=self.slopeGridStep, betaSpacing=self.slopeSpacing, betaDist=self.slopePrior, betaMu=self.slopePriorMu, betaSTD=self.slopePriorSTD, gammaLim=(self.loGuess, self.hiGuess), gammaStep=self.guessGridStep, gammaSpacing=self.guessSpacing, gammaDist=self.guessPrior, gammaMu=self.guessPriorMu, gammaSTD=self.guessPriorSTD, lambdaLim=(self.loLapse, self.hiLapse), lambdaStep=self.lapseGridStep, lambdaSpacing=self.lapseSpacing, lambdaDist=self.lapsePrior, lambdaMu=self.lapsePriorMu, lambdaSTD=self.lapsePriorSTD) elif self.stimScaling == "Logarithmic": self.UML = setupUMLEstGuessRate(model=self.psyFun, swptRule="Up-Down", nDown=2, centTend = "Mean", stimScale = self.stimScaling, x0=1, xLim=(abs(self.loStim), abs(self.hiStim)), alphaLim=(abs(self.loMidPoint), abs(self.hiMidPoint)), alphaStep=self.threshGridStep, alphaSpacing="Linear", alphaDist=self.threshPrior, alphaMu=abs(self.threshPriorMu), alphaSTD=self.threshPriorSTD, betaLim=(self.loSlope, self.hiSlope), betaStep=self.slopeGridStep, betaSpacing=self.slopeSpacing, betaDist=self.slopePrior, betaMu=self.slopePriorMu, betaSTD=self.slopePriorSTD, gammaLim=(self.loGuess, self.hiGuess), gammaStep=self.guessGridStep, gammaSpacing=self.guessSpacing, gammaDist=self.guessPrior, gammaMu=self.guessPriorMu, gammaSTD=self.guessPriorSTD, lambdaLim=(self.loLapse, self.hiLapse), lambdaStep=self.lapseGridStep, lambdaSpacing=self.lapseSpacing, lambdaDist=self.lapsePrior, lambdaMu=self.lapsePriorMu, lambdaSTD=self.lapsePriorSTD) def plotDataMidpoint(self): self.ax1.clear() self.A = setPrior(self.UML["a"], self.UML["par"]["alpha"]) if self.stimScaling == "Linear": markerline, stemlines, baseline = self.ax1.stem(self.UML["alpha"], self.A[:,0,0,0], 'k') elif self.stimScaling == "Logarithmic": markerline, stemlines, baseline = self.ax1.stem(exp(self.UML["alpha"]), self.A[:,0,0,0]/exp(self.UML["alpha"]), 'k') if self.loStim < 0: self.ax1.set_xticklabels(list(map(str, -self.ax1.get_xticks()))) plt.setp(markerline, 'markerfacecolor', 'k') nAlpha = len(self.A[:,0,0]) self.ax1.set_title("Midpoint, #Points " + str(nAlpha)) def plotDataSlope(self): self.ax2.clear() self.B = setPrior(self.UML["b"], self.UML["par"]["beta"]) if self.UML["par"]["beta"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax2.stem(self.UML["beta"], self.B[0,:,0,0], 'k') elif self.UML["par"]["beta"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax2.stem(self.UML["beta"], self.B[0,:,0,0]/self.UML["beta"], 'k') plt.setp(markerline, 'markerfacecolor', 'k') nBeta = len(self.B[0,:,0,0]) self.ax2.set_title("Slope, #Points " + str(nBeta)) def plotDataGuess(self): self.ax4.clear() self.G = setPrior(self.UML["g"], self.UML["par"]["gamma"]) if self.UML["par"]["gamma"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax4.stem(self.UML["gamma"], self.G[0,0,:,0], 'k') elif self.UML["par"]["gamma"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax4.stem(self.UML["gamma"], self.G[0,0,:,0]/self.UML["gamma"], 'k') plt.setp(markerline, 'markerfacecolor', 'k') nGamma = len(self.G[0,0,:,0]) self.ax4.set_title("Guess, #Points " + str(nGamma)) def plotDataLapse(self): self.ax3.clear() L = setPrior(self.UML["l"], self.UML["par"]["lambda"]) if self.UML["par"]["lambda"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax3.stem(self.UML["lambda"], L[0,0,0,:], 'k') elif self.UML["par"]["lambda"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax3.stem(self.UML["lambda"], L[0,0,0,:]/self.UML["lambda"], 'k') plt.setp(markerline, 'markerfacecolor', 'k') nLambda = len(L[0,0,0,:]) self.ax3.set_title("Lapse, #Points " + str(nLambda)) def plotDataMidpointLogAxis(self): self.ax1.clear() self.A = setPrior(self.UML["a"], self.UML["par"]["alpha"]) if self.stimScaling == "Logarithmic": x = self.UML["alpha"] markerline, stemlines, baseline = self.ax1.stem(x, self.A[:,0,0,0], 'k') elif self.stimScaling == "Linear": x = log(self.UML["alpha"]) markerline, stemlines, baseline = self.ax1.stem(x, self.A[:,0,0,0]*self.UML["alpha"], 'k') setLogTicks(self.ax1, exp(1)) plt.setp(markerline, 'markerfacecolor', 'k') nAlpha = len(self.A[:,0,0,0]) self.ax1.set_title("Midpoint, #Points " + str(nAlpha)) def plotDataSlopeLogAxis(self): self.ax2.clear() self.B = setPrior(self.UML["b"], self.UML["par"]["beta"]) if self.UML["par"]["beta"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax2.stem(log(self.UML["beta"]), self.B[0,:,0,0], 'k') elif self.UML["par"]["beta"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax2.stem(log(self.UML["beta"]), self.B[0,:,0,0]*self.UML["beta"], 'k') setLogTicks(self.ax2, exp(1)) plt.setp(markerline, 'markerfacecolor', 'k') nBeta = len(self.B[0,:,0,0]) self.ax2.set_title("Slope, #Points " + str(nBeta)) def plotDataGuessLogAxis(self): self.ax4.clear() self.G = setPrior(self.UML["g"], self.UML["par"]["gamma"]) if self.UML["par"]["gamma"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax4.stem(log(self.UML["gamma"]), self.G[0,0,:,0], 'k') elif self.UML["par"]["gamma"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax4.stem(log(self.UML["gamma"]), self.G[0,0,:,0]*self.UML["gamma"], 'k') setLogTicks(self.ax4, exp(1)) plt.setp(markerline, 'markerfacecolor', 'k') nGamma = len(self.G[0,0,:,0]) self.ax4.set_title("Guess, #Points " + str(nGamma)) def plotDataLapseLogAxis(self): self.ax3.clear() L = setPrior(self.UML["l"], self.UML["par"]["lambda"]) if self.UML["par"]["lambda"]["spacing"] == "Logarithmic": markerline, stemlines, baseline = self.ax3.stem(log(self.UML["lambda"]), L[0,0,0,:], 'k') elif self.UML["par"]["lambda"]["spacing"] == "Linear": markerline, stemlines, baseline = self.ax3.stem(log(self.UML["lambda"]), L[0,0,0,:]*self.UML["lambda"], 'k') setLogTicks(self.ax3, exp(1)) plt.setp(markerline, 'markerfacecolor', 'k') nLambda = len(L[0,0,0,:]) self.ax3.set_title("Lapse, #Points " + str(nLambda)) def onClickUpdateButton(self): self.getUMLPars() if self.logAxisMidpoint.isChecked() == False: self.plotDataMidpoint() else: self.plotDataMidpointLogAxis() if self.logAxisSlope.isChecked() == False: self.plotDataSlope() else: self.plotDataSlopeLogAxis() if self.logAxisGuess.isChecked() == False: self.plotDataGuess() else: self.plotDataGuessLogAxis() if self.logAxisLapse.isChecked() == False: self.plotDataLapse() else: self.plotDataLapseLogAxis() self.canvas.draw() def toggleMidpointLogAxis(self): if self.logAxisMidpoint.isChecked() == True: self.plotDataMidpointLogAxis() else: self.plotDataMidpoint() self.canvas.draw() def toggleSlopeLogAxis(self): if self.logAxisSlope.isChecked() == True: self.plotDataSlopeLogAxis() else: self.plotDataSlope() self.canvas.draw() def toggleGuessLogAxis(self): if self.logAxisGuess.isChecked() == True: self.plotDataGuessLogAxis() else: self.plotDataGuess() self.canvas.draw() def toggleLapseLogAxis(self): if self.logAxisLapse.isChecked() == True: self.plotDataLapseLogAxis() else: self.plotDataLapse() self.canvas.draw()
gpl-3.0
benjaminoh1/tensorflowcookbook
Chapter 03/lin_reg_decomposition.py
1
1611
# Linear Regression: Decomposition Method #---------------------------------- # # This function shows how to use Tensorflow to # solve linear regression via the matrix inverse. # # Given Ax=b, and a Cholesky decomposition such that # A = L*L' then we can get solve for x via # 1) L*y=t(A)*b # 2) L'*x=y import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.python.framework import ops ops.reset_default_graph() # Create graph sess = tf.Session() # Create the data x_vals = np.linspace(0, 10, 100) y_vals = x_vals + np.random.normal(0, 1, 100) # Create design matrix x_vals_column = np.transpose(np.matrix(x_vals)) ones_column = np.transpose(np.matrix(np.repeat(1, 100))) A = np.column_stack((x_vals_column, ones_column)) # Create b matrix b = np.transpose(np.matrix(y_vals)) # Create tensors A_tensor = tf.constant(A) b_tensor = tf.constant(b) # Find Cholesky Decomposition tA_A = tf.matmul(tf.transpose(A_tensor), A_tensor) L = tf.cholesky(tA_A) # Solve L*y=t(A)*b tA_b = tf.matmul(tf.transpose(A_tensor), b) sol1 = tf.matrix_solve(L, tA_b) # Solve L' * y = sol1 sol2 = tf.matrix_solve(tf.transpose(L), sol1) solution_eval = sess.run(sol2) # Extract coefficients slope = solution_eval[0][0] y_intercept = solution_eval[1][0] print('slope: ' + str(slope)) print('y_intercept: ' + str(y_intercept)) # Get best fit line best_fit = [] for i in x_vals: best_fit.append(slope*i+y_intercept) # Plot the results plt.plot(x_vals, y_vals, 'o', label='Data') plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) plt.legend(loc='upper left') plt.show()
mit
ZenDevelopmentSystems/scikit-learn
sklearn/linear_model/tests/test_perceptron.py
378
1815
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_raises from sklearn.utils import check_random_state from sklearn.datasets import load_iris from sklearn.linear_model import Perceptron iris = load_iris() random_state = check_random_state(12) indices = np.arange(iris.data.shape[0]) random_state.shuffle(indices) X = iris.data[indices] y = iris.target[indices] X_csr = sp.csr_matrix(X) X_csr.sort_indices() class MyPerceptron(object): def __init__(self, n_iter=1): self.n_iter = n_iter def fit(self, X, y): n_samples, n_features = X.shape self.w = np.zeros(n_features, dtype=np.float64) self.b = 0.0 for t in range(self.n_iter): for i in range(n_samples): if self.predict(X[i])[0] != y[i]: self.w += y[i] * X[i] self.b += y[i] def project(self, X): return np.dot(X, self.w) + self.b def predict(self, X): X = np.atleast_2d(X) return np.sign(self.project(X)) def test_perceptron_accuracy(): for data in (X, X_csr): clf = Perceptron(n_iter=30, shuffle=False) clf.fit(data, y) score = clf.score(data, y) assert_true(score >= 0.7) def test_perceptron_correctness(): y_bin = y.copy() y_bin[y != 1] = -1 clf1 = MyPerceptron(n_iter=2) clf1.fit(X, y_bin) clf2 = Perceptron(n_iter=2, shuffle=False) clf2.fit(X, y_bin) assert_array_almost_equal(clf1.w, clf2.coef_.ravel()) def test_undefined_methods(): clf = Perceptron() for meth in ("predict_proba", "predict_log_proba"): assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
bsd-3-clause
nvoron23/scikit-learn
sklearn/decomposition/tests/test_online_lda.py
49
13124
import numpy as np from scipy.linalg import block_diag from scipy.sparse import csr_matrix from scipy.special import psi from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d, _dirichlet_expectation_2d) from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import if_safe_multiprocessing_with_blas from sklearn.utils.validation import NotFittedError from sklearn.externals.six.moves import xrange def _build_sparse_mtx(): # Create 3 topics and each topic has 3 disticnt words. # (Each word only belongs to a single topic.) n_topics = 3 block = n_topics * np.ones((3, 3)) blocks = [block] * n_topics X = block_diag(*blocks) X = csr_matrix(X) return (n_topics, X) def test_lda_default_prior_params(): # default prior parameter should be `1 / topics` # and verbose params should not affect result n_topics, X = _build_sparse_mtx() prior = 1. / n_topics lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior, topic_word_prior=prior, random_state=0) lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0) topic_distr_1 = lda_1.fit_transform(X) topic_distr_2 = lda_2.fit_transform(X) assert_almost_equal(topic_distr_1, topic_distr_2) def test_lda_fit_batch(): # Test LDA batch learning_offset (`fit` method with 'batch' learning) rng = np.random.RandomState(0) n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1, learning_method='batch', random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_fit_online(): # Test LDA online learning (`fit` method with 'online' learning) rng = np.random.RandomState(0) n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10., evaluate_every=1, learning_method='online', random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_partial_fit(): # Test LDA online learning (`partial_fit` method) # (same as test_lda_batch) rng = np.random.RandomState(0) n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10., total_samples=100, random_state=rng) for i in xrange(3): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_dense_input(): # Test LDA with dense input. rng = np.random.RandomState(0) n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch', random_state=rng) lda.fit(X.toarray()) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for component in lda.components_: # Find top 3 words in each LDA component top_idx = set(component.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_transform(): # Test LDA transform. # Transform result cannot be negative rng = np.random.RandomState(0) X = rng.randint(5, size=(20, 10)) n_topics = 3 lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng) X_trans = lda.fit_transform(X) assert_true((X_trans > 0.0).any()) def test_lda_fit_transform(): # Test LDA fit_transform & transform # fit_transform and transform result should be the same for method in ('online', 'batch'): rng = np.random.RandomState(0) X = rng.randint(10, size=(50, 20)) lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng) X_fit = lda.fit_transform(X) X_trans = lda.transform(X) assert_array_almost_equal(X_fit, X_trans, 4) def test_lda_partial_fit_dim_mismatch(): # test `n_features` mismatch in `partial_fit` rng = np.random.RandomState(0) n_topics = rng.randint(3, 6) n_col = rng.randint(6, 10) X_1 = np.random.randint(4, size=(10, n_col)) X_2 = np.random.randint(4, size=(10, n_col + 1)) lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5., total_samples=20, random_state=rng) lda.partial_fit(X_1) assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2) def test_invalid_params(): # test `_check_params` method X = np.ones((5, 10)) invalid_models = ( ('n_topics', LatentDirichletAllocation(n_topics=0)), ('learning_method', LatentDirichletAllocation(learning_method='unknown')), ('total_samples', LatentDirichletAllocation(total_samples=0)), ('learning_offset', LatentDirichletAllocation(learning_offset=-1)), ) for param, model in invalid_models: regex = r"^Invalid %r parameter" % param assert_raises_regexp(ValueError, regex, model.fit, X) def test_lda_negative_input(): # test pass dense matrix with sparse negative input. X = -np.ones((5, 10)) lda = LatentDirichletAllocation() regex = r"^Negative values in data passed" assert_raises_regexp(ValueError, regex, lda.fit, X) def test_lda_no_component_error(): # test `transform` and `perplexity` before `fit` rng = np.random.RandomState(0) X = rng.randint(4, size=(20, 10)) lda = LatentDirichletAllocation() regex = r"^no 'components_' attribute" assert_raises_regexp(NotFittedError, regex, lda.transform, X) assert_raises_regexp(NotFittedError, regex, lda.perplexity, X) def test_lda_transform_mismatch(): # test `n_features` mismatch in partial_fit and transform rng = np.random.RandomState(0) X = rng.randint(4, size=(20, 10)) X_2 = rng.randint(4, size=(10, 8)) n_topics = rng.randint(3, 6) lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng) lda.partial_fit(X) assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2) @if_safe_multiprocessing_with_blas def test_lda_multi_jobs(): n_topics, X = _build_sparse_mtx() # Test LDA batch training with multi CPU for method in ('online', 'batch'): rng = np.random.RandomState(0) lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2, learning_method=method, random_state=rng) lda.fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) @if_safe_multiprocessing_with_blas def test_lda_partial_fit_multi_jobs(): # Test LDA online training with multi CPU rng = np.random.RandomState(0) n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2, learning_offset=5., total_samples=30, random_state=rng) for i in range(2): lda.partial_fit(X) correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)] for c in lda.components_: top_idx = set(c.argsort()[-3:][::-1]) assert_true(tuple(sorted(top_idx)) in correct_idx_grps) def test_lda_preplexity_mismatch(): # test dimension mismatch in `perplexity` method rng = np.random.RandomState(0) n_topics = rng.randint(3, 6) n_samples = rng.randint(6, 10) X = np.random.randint(4, size=(n_samples, 10)) lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5., total_samples=20, random_state=rng) lda.fit(X) # invalid samples invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics)) assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples) # invalid topic number invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1)) assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics) def test_lda_perplexity(): # Test LDA perplexity for batch training # perplexity should be lower after each iteration n_topics, X = _build_sparse_mtx() for method in ('online', 'batch'): lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method, total_samples=100, random_state=0) lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method, total_samples=100, random_state=0) distr_1 = lda_1.fit_transform(X) perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False) distr_2 = lda_2.fit_transform(X) perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False) assert_greater_equal(perp_1, perp_2) perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True) perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True) assert_greater_equal(perp_1_subsampling, perp_2_subsampling) def test_lda_score(): # Test LDA score for batch training # score should be higher after each iteration n_topics, X = _build_sparse_mtx() for method in ('online', 'batch'): lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method, total_samples=100, random_state=0) lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method, total_samples=100, random_state=0) lda_1.fit_transform(X) score_1 = lda_1.score(X) lda_2.fit_transform(X) score_2 = lda_2.score(X) assert_greater_equal(score_2, score_1) def test_perplexity_input_format(): # Test LDA perplexity for sparse and dense input # score should be the same for both dense and sparse input n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch', total_samples=100, random_state=0) distr = lda.fit_transform(X) perp_1 = lda.perplexity(X) perp_2 = lda.perplexity(X, distr) perp_3 = lda.perplexity(X.toarray(), distr) assert_almost_equal(perp_1, perp_2) assert_almost_equal(perp_1, perp_3) def test_lda_score_perplexity(): # Test the relationship between LDA score and perplexity n_topics, X = _build_sparse_mtx() lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, random_state=0) distr = lda.fit_transform(X) perplexity_1 = lda.perplexity(X, distr, sub_sampling=False) score = lda.score(X) perplexity_2 = np.exp(-1. * (score / np.sum(X.data))) assert_almost_equal(perplexity_1, perplexity_2) def test_lda_empty_docs(): """Test LDA on empty document (all-zero rows).""" Z = np.zeros((5, 4)) for X in [Z, csr_matrix(Z)]: lda = LatentDirichletAllocation(max_iter=750).fit(X) assert_almost_equal(lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])) def test_dirichlet_expectation(): """Test Cython version of Dirichlet expectation calculation.""" x = np.logspace(-100, 10, 10000) assert_allclose(_dirichlet_expectation_1d(x), np.exp(psi(x) - psi(np.sum(x))), atol=1e-19) x = x.reshape(100, 100) assert_allclose(_dirichlet_expectation_2d(x), psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]), rtol=1e-11, atol=3e-9)
bsd-3-clause
scottpurdy/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
69
259904
from __future__ import division, generators import math, sys, warnings, datetime, new import numpy as np from numpy import ma import matplotlib rcParams = matplotlib.rcParams import matplotlib.artist as martist import matplotlib.axis as maxis import matplotlib.cbook as cbook import matplotlib.collections as mcoll import matplotlib.colors as mcolors import matplotlib.contour as mcontour import matplotlib.dates as mdates import matplotlib.font_manager as font_manager import matplotlib.image as mimage import matplotlib.legend as mlegend import matplotlib.lines as mlines import matplotlib.mlab as mlab import matplotlib.patches as mpatches import matplotlib.quiver as mquiver import matplotlib.scale as mscale import matplotlib.table as mtable import matplotlib.text as mtext import matplotlib.ticker as mticker import matplotlib.transforms as mtransforms iterable = cbook.iterable is_string_like = cbook.is_string_like def _process_plot_format(fmt): """ Process a matlab(TM) style color/line style format string. Return a (*linestyle*, *color*) tuple as a result of the processing. Default values are ('-', 'b'). Example format strings include: * 'ko': black circles * '.b': blue dots * 'r--': red dashed lines .. seealso:: :func:`~matplotlib.Line2D.lineStyles` and :func:`~matplotlib.pyplot.colors`: for all possible styles and color format string. """ linestyle = None marker = None color = None # Is fmt just a colorspec? try: color = mcolors.colorConverter.to_rgb(fmt) return linestyle, marker, color # Yes. except ValueError: pass # No, not just a color. # handle the multi char special cases and strip them from the # string if fmt.find('--')>=0: linestyle = '--' fmt = fmt.replace('--', '') if fmt.find('-.')>=0: linestyle = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ')>=0: linestyle = 'None' fmt = fmt.replace(' ', '') chars = [c for c in fmt] for c in chars: if c in mlines.lineStyles: if linestyle is not None: raise ValueError( 'Illegal format string "%s"; two linestyle symbols' % fmt) linestyle = c elif c in mlines.lineMarkers: if marker is not None: raise ValueError( 'Illegal format string "%s"; two marker symbols' % fmt) marker = c elif c in mcolors.colorConverter.colors: if color is not None: raise ValueError( 'Illegal format string "%s"; two color symbols' % fmt) color = c else: raise ValueError( 'Unrecognized character %c in format string' % c) if linestyle is None and marker is None: linestyle = rcParams['lines.linestyle'] if linestyle is None: linestyle = 'None' if marker is None: marker = 'None' return linestyle, marker, color def set_default_color_cycle(clist): """ Change the default cycle of colors that will be used by the plot command. This must be called before creating the :class:`Axes` to which it will apply; it will apply to all future axes. *clist* is a sequence of mpl color specifiers """ _process_plot_var_args.defaultColors = clist[:] rcParams['lines.color'] = clist[0] class _process_plot_var_args: """ Process variable length arguments to the plot command, so that plot commands like the following are supported:: plot(t, s) plot(t1, s1, t2, s2) plot(t1, s1, 'ko', t2, s2) plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3) an arbitrary number of *x*, *y*, *fmt* are allowed """ defaultColors = ['b','g','r','c','m','y','k'] def __init__(self, axes, command='plot'): self.axes = axes self.command = command self._clear_color_cycle() def _clear_color_cycle(self): self.colors = _process_plot_var_args.defaultColors[:] # if the default line color is a color format string, move it up # in the que try: ind = self.colors.index(rcParams['lines.color']) except ValueError: self.firstColor = rcParams['lines.color'] else: self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def set_color_cycle(self, clist): self.colors = clist[:] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def _get_next_cycle_color(self): if self.count==0: color = self.firstColor else: color = self.colors[int(self.count % self.Ncolors)] self.count += 1 return color def __call__(self, *args, **kwargs): if self.axes.xaxis is not None and self.axes.yaxis is not None: xunits = kwargs.pop( 'xunits', self.axes.xaxis.units) yunits = kwargs.pop( 'yunits', self.axes.yaxis.units) if xunits!=self.axes.xaxis.units: self.axes.xaxis.set_units(xunits) if yunits!=self.axes.yaxis.units: self.axes.yaxis.set_units(yunits) ret = self._grab_next_args(*args, **kwargs) return ret def set_lineprops(self, line, **kwargs): assert self.command == 'plot', 'set_lineprops only works with "plot"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(line,funcName): raise TypeError, 'There is no line property "%s"'%key func = getattr(line,funcName) func(val) def set_patchprops(self, fill_poly, **kwargs): assert self.command == 'fill', 'set_patchprops only works with "fill"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(fill_poly,funcName): raise TypeError, 'There is no patch property "%s"'%key func = getattr(fill_poly,funcName) func(val) def _xy_from_y(self, y): if self.axes.yaxis is not None: b = self.axes.yaxis.update_units(y) if b: return np.arange(len(y)), y, False if not ma.isMaskedArray(y): y = np.asarray(y) if len(y.shape) == 1: y = y[:,np.newaxis] nr, nc = y.shape x = np.arange(nr) if len(x.shape) == 1: x = x[:,np.newaxis] return x,y, True def _xy_from_xy(self, x, y): if self.axes.xaxis is not None and self.axes.yaxis is not None: bx = self.axes.xaxis.update_units(x) by = self.axes.yaxis.update_units(y) # right now multicol is not supported if either x or y are # unit enabled but this can be fixed.. if bx or by: return x, y, False x = ma.asarray(x) y = ma.asarray(y) if len(x.shape) == 1: x = x[:,np.newaxis] if len(y.shape) == 1: y = y[:,np.newaxis] nrx, ncx = x.shape nry, ncy = y.shape assert nrx == nry, 'Dimensions of x and y are incompatible' if ncx == ncy: return x, y, True if ncx == 1: x = np.repeat(x, ncy, axis=1) if ncy == 1: y = np.repeat(y, ncx, axis=1) assert x.shape == y.shape, 'Dimensions of x and y are incompatible' return x, y, True def _plot_1_arg(self, y, **kwargs): assert self.command == 'plot', 'fill needs at least 2 arguments' ret = [] x, y, multicol = self._xy_from_y(y) if multicol: for j in xrange(y.shape[1]): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y[:,j], color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) else: color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) return ret def _plot_2_args(self, tup2, **kwargs): ret = [] if is_string_like(tup2[1]): assert self.command == 'plot', ('fill needs at least 2 non-string ' 'arguments') y, fmt = tup2 x, y, multicol = self._xy_from_y(y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) if multicol: for j in xrange(y.shape[1]): makeline(x[:,j], y[:,j]) else: makeline(x, y) return ret else: x, y = tup2 x, y, multicol = self._xy_from_xy(x, y) def makeline(x, y): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) facecolor = self._get_next_cycle_color() seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _plot_3_args(self, tup3, **kwargs): ret = [] x, y, fmt = tup3 x, y, multicol = self._xy_from_xy(x, y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): facecolor = color x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _grab_next_args(self, *args, **kwargs): remaining = args while 1: if len(remaining)==0: return if len(remaining)==1: for seg in self._plot_1_arg(remaining[0], **kwargs): yield seg remaining = [] continue if len(remaining)==2: for seg in self._plot_2_args(remaining, **kwargs): yield seg remaining = [] continue if len(remaining)==3: if not is_string_like(remaining[2]): raise ValueError, 'third arg must be a format string' for seg in self._plot_3_args(remaining, **kwargs): yield seg remaining=[] continue if is_string_like(remaining[2]): for seg in self._plot_3_args(remaining[:3], **kwargs): yield seg remaining=remaining[3:] else: for seg in self._plot_2_args(remaining[:2], **kwargs): yield seg remaining=remaining[2:] class Axes(martist.Artist): """ The :class:`Axes` contains most of the figure elements: :class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`, :class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`, :class:`~matplotlib.patches.Polygon`, etc., and sets the coordinate system. The :class:`Axes` instance supports callbacks through a callbacks attribute which is a :class:`~matplotlib.cbook.CallbackRegistry` instance. The events you can connect to are 'xlim_changed' and 'ylim_changed' and the callback will be called with func(*ax*) where *ax* is the :class:`Axes` instance. """ name = "rectilinear" _shared_x_axes = cbook.Grouper() _shared_y_axes = cbook.Grouper() def __str__(self): return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds) def __init__(self, fig, rect, axisbg = None, # defaults to rc axes.facecolor frameon = True, sharex=None, # use Axes instance's xaxis info sharey=None, # use Axes instance's yaxis info label='', **kwargs ): """ Build an :class:`Axes` instance in :class:`~matplotlib.figure.Figure` *fig* with *rect=[left, bottom, width, height]* in :class:`~matplotlib.figure.Figure` coordinates Optional keyword arguments: ================ ========================================= Keyword Description ================ ========================================= *adjustable* [ 'box' | 'datalim' ] *alpha* float: the alpha transparency *anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W' ] *aspect* [ 'auto' | 'equal' | aspect_ratio ] *autoscale_on* [ *True* | *False* ] whether or not to autoscale the *viewlim* *axis_bgcolor* any matplotlib color, see :func:`~matplotlib.pyplot.colors` *axisbelow* draw the grids and ticks below the other artists *cursor_props* a (*float*, *color*) tuple *figure* a :class:`~matplotlib.figure.Figure` instance *frame_on* a boolean - draw the axes frame *label* the axes label *navigate* [ *True* | *False* ] *navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation toolbar button status *position* [left, bottom, width, height] in class:`~matplotlib.figure.Figure` coords *sharex* an class:`~matplotlib.axes.Axes` instance to share the x-axis with *sharey* an class:`~matplotlib.axes.Axes` instance to share the y-axis with *title* the title string *visible* [ *True* | *False* ] whether the axes is visible *xlabel* the xlabel *xlim* (*xmin*, *xmax*) view limits *xscale* [%(scale)s] *xticklabels* sequence of strings *xticks* sequence of floats *ylabel* the ylabel strings *ylim* (*ymin*, *ymax*) view limits *yscale* [%(scale)s] *yticklabels* sequence of strings *yticks* sequence of floats ================ ========================================= """ % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])} martist.Artist.__init__(self) if isinstance(rect, mtransforms.Bbox): self._position = rect else: self._position = mtransforms.Bbox.from_bounds(*rect) self._originalPosition = self._position.frozen() self.set_axes(self) self.set_aspect('auto') self._adjustable = 'box' self.set_anchor('C') self._sharex = sharex self._sharey = sharey if sharex is not None: self._shared_x_axes.join(self, sharex) if sharex._adjustable == 'box': sharex._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' if sharey is not None: self._shared_y_axes.join(self, sharey) if sharey._adjustable == 'box': sharey._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' self.set_label(label) self.set_figure(fig) # this call may differ for non-sep axes, eg polar self._init_axis() if axisbg is None: axisbg = rcParams['axes.facecolor'] self._axisbg = axisbg self._frameon = frameon self._axisbelow = rcParams['axes.axisbelow'] self._hold = rcParams['axes.hold'] self._connected = {} # a dict from events to (id, func) self.cla() # funcs used to format x and y - fall back on major formatters self.fmt_xdata = None self.fmt_ydata = None self.set_cursor_props((1,'k')) # set the cursor properties for axes self._cachedRenderer = None self.set_navigate(True) self.set_navigate_mode(None) if len(kwargs): martist.setp(self, **kwargs) if self.xaxis is not None: self._xcid = self.xaxis.callbacks.connect('units finalize', self.relim) if self.yaxis is not None: self._ycid = self.yaxis.callbacks.connect('units finalize', self.relim) def get_window_extent(self, *args, **kwargs): ''' get the axes bounding box in display space; *args* and *kwargs* are empty ''' return self.bbox def _init_axis(self): "move this out of __init__ because non-separable axes don't use it" self.xaxis = maxis.XAxis(self) self.yaxis = maxis.YAxis(self) self._update_transScale() def set_figure(self, fig): """ Set the class:`~matplotlib.axes.Axes` figure accepts a class:`~matplotlib.figure.Figure` instance """ martist.Artist.set_figure(self, fig) self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure) #these will be updated later as data is added self.dataLim = mtransforms.Bbox.unit() self.viewLim = mtransforms.Bbox.unit() self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) self._set_lim_and_transforms() def _set_lim_and_transforms(self): """ set the *dataLim* and *viewLim* :class:`~matplotlib.transforms.Bbox` attributes and the *transScale*, *transData*, *transLimits* and *transAxes* transformations. """ self.transAxes = mtransforms.BboxTransformTo(self.bbox) # Transforms the x and y axis separately by a scale factor # It is assumed that this part will have non-linear components self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) # An affine transformation on the data, generally to limit the # range of the axes self.transLimits = mtransforms.BboxTransformFrom( mtransforms.TransformedBbox(self.viewLim, self.transScale)) # The parentheses are important for efficiency here -- they # group the last two (which are usually affines) separately # from the first (which, with log-scaling can be non-affine). self.transData = self.transScale + (self.transLimits + self.transAxes) self._xaxis_transform = mtransforms.blended_transform_factory( self.axes.transData, self.axes.transAxes) self._yaxis_transform = mtransforms.blended_transform_factory( self.axes.transAxes, self.axes.transData) def get_xaxis_transform(self): """ Get the transformation used for drawing x-axis labels, ticks and gridlines. The x-direction is in data coordinates and the y-direction is in axis coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._xaxis_transform def get_xaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0, self.figure.dpi_scale_trans), "top", "center") def get_xaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, pad_points / 72.0, self.figure.dpi_scale_trans), "bottom", "center") def get_yaxis_transform(self): """ Get the transformation used for drawing y-axis labels, ticks and gridlines. The x-direction is in axis coordinates and the y-direction is in data coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._yaxis_transform def get_yaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "right") def get_yaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "left") def _update_transScale(self): self.transScale.set( mtransforms.blended_transform_factory( self.xaxis.get_transform(), self.yaxis.get_transform())) if hasattr(self, "lines"): for line in self.lines: line._transformed_path.invalidate() def get_position(self, original=False): 'Return the a copy of the axes rectangle as a Bbox' if original: return self._originalPosition.frozen() else: return self._position.frozen() def set_position(self, pos, which='both'): """ Set the axes position with:: pos = [left, bottom, width, height] in relative 0,1 coords, or *pos* can be a :class:`~matplotlib.transforms.Bbox` There are two position variables: one which is ultimately used, but which may be modified by :meth:`apply_aspect`, and a second which is the starting point for :meth:`apply_aspect`. Optional keyword arguments: *which* ========== ==================== value description ========== ==================== 'active' to change the first 'original' to change the second 'both' to change both ========== ==================== """ if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) if which in ('both', 'active'): self._position.set(pos) if which in ('both', 'original'): self._originalPosition.set(pos) def reset_position(self): 'Make the original position the active position' pos = self.get_position(original=True) self.set_position(pos, which='active') def _set_artist_props(self, a): 'set the boilerplate props for artists added to axes' a.set_figure(self.figure) if not a.is_transform_set(): a.set_transform(self.transData) a.set_axes(self) def _gen_axes_patch(self): """ Returns the patch used to draw the background of the axes. It is also used as the clipping path for any data elements on the axes. In the standard axes, this is a rectangle, but in other projections it may not be. .. note:: Intended to be overridden by new projection types. """ return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0) def cla(self): 'Clear the current axes' # Note: this is called by Axes.__init__() self.xaxis.cla() self.yaxis.cla() self.ignore_existing_data_limits = True self.callbacks = cbook.CallbackRegistry(('xlim_changed', 'ylim_changed')) if self._sharex is not None: # major and minor are class instances with # locator and formatter attributes self.xaxis.major = self._sharex.xaxis.major self.xaxis.minor = self._sharex.xaxis.minor x0, x1 = self._sharex.get_xlim() self.set_xlim(x0, x1, emit=False) self.xaxis.set_scale(self._sharex.xaxis.get_scale()) else: self.xaxis.set_scale('linear') if self._sharey is not None: self.yaxis.major = self._sharey.yaxis.major self.yaxis.minor = self._sharey.yaxis.minor y0, y1 = self._sharey.get_ylim() self.set_ylim(y0, y1, emit=False) self.yaxis.set_scale(self._sharey.yaxis.get_scale()) else: self.yaxis.set_scale('linear') self._autoscaleon = True self._update_transScale() # needed? self._get_lines = _process_plot_var_args(self) self._get_patches_for_fill = _process_plot_var_args(self, 'fill') self._gridOn = rcParams['axes.grid'] self.lines = [] self.patches = [] self.texts = [] self.tables = [] self.artists = [] self.images = [] self.legend_ = None self.collections = [] # collection.Collection instances self.grid(self._gridOn) props = font_manager.FontProperties(size=rcParams['axes.titlesize']) self.titleOffsetTrans = mtransforms.ScaledTranslation( 0.0, 5.0 / 72.0, self.figure.dpi_scale_trans) self.title = mtext.Text( x=0.5, y=1.0, text='', fontproperties=props, verticalalignment='bottom', horizontalalignment='center', ) self.title.set_transform(self.transAxes + self.titleOffsetTrans) self.title.set_clip_box(None) self._set_artist_props(self.title) # the patch draws the background of the axes. we want this to # be below the other artists; the axesPatch name is # deprecated. We use the frame to draw the edges so we are # setting the edgecolor to None self.patch = self.axesPatch = self._gen_axes_patch() self.patch.set_figure(self.figure) self.patch.set_facecolor(self._axisbg) self.patch.set_edgecolor('None') self.patch.set_linewidth(0) self.patch.set_transform(self.transAxes) # the frame draws the border around the axes and we want this # above. this is a place holder for a more sophisticated # artist that might just draw a left, bottom frame, or a # centered frame, etc the axesFrame name is deprecated self.frame = self.axesFrame = self._gen_axes_patch() self.frame.set_figure(self.figure) self.frame.set_facecolor('none') self.frame.set_edgecolor(rcParams['axes.edgecolor']) self.frame.set_linewidth(rcParams['axes.linewidth']) self.frame.set_transform(self.transAxes) self.frame.set_zorder(2.5) self.axison = True self.xaxis.set_clip_path(self.patch) self.yaxis.set_clip_path(self.patch) self._shared_x_axes.clean() self._shared_y_axes.clean() def clear(self): 'clear the axes' self.cla() def set_color_cycle(self, clist): """ Set the color cycle for any future plot commands on this Axes. clist is a list of mpl color specifiers. """ self._get_lines.set_color_cycle(clist) def ishold(self): 'return the HOLD status of the axes' return self._hold def hold(self, b=None): """ call signature:: hold(b=None) Set the hold state. If *hold* is *None* (default), toggle the *hold* state. Else set the *hold* state to boolean value *b*. Examples: * toggle hold: >>> hold() * turn hold on: >>> hold(True) * turn hold off >>> hold(False) When hold is True, subsequent plot commands will be added to the current axes. When hold is False, the current axes and figure will be cleared on the next plot command """ if b is None: self._hold = not self._hold else: self._hold = b def get_aspect(self): return self._aspect def set_aspect(self, aspect, adjustable=None, anchor=None): """ *aspect* ======== ================================================ value description ======== ================================================ 'auto' automatic; fill position rectangle with data 'normal' same as 'auto'; deprecated 'equal' same scaling from data to plot units for x and y num a circle will be stretched such that the height is num times the width. aspect=1 is the same as aspect='equal'. ======== ================================================ *adjustable* ========= ============================ value description ========= ============================ 'box' change physical size of axes 'datalim' change xlim or ylim ========= ============================ *anchor* ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== """ if aspect in ('normal', 'auto'): self._aspect = 'auto' elif aspect == 'equal': self._aspect = 'equal' else: self._aspect = float(aspect) # raise ValueError if necessary if adjustable is not None: self.set_adjustable(adjustable) if anchor is not None: self.set_anchor(anchor) def get_adjustable(self): return self._adjustable def set_adjustable(self, adjustable): """ ACCEPTS: [ 'box' | 'datalim' ] """ if adjustable in ('box', 'datalim'): if self in self._shared_x_axes or self in self._shared_y_axes: if adjustable == 'box': raise ValueError( 'adjustable must be "datalim" for shared axes') self._adjustable = adjustable else: raise ValueError('argument must be "box", or "datalim"') def get_anchor(self): return self._anchor def set_anchor(self, anchor): """ *anchor* ===== ============ value description ===== ============ 'C' Center 'SW' bottom left 'S' bottom 'SE' bottom right 'E' right 'NE' top right 'N' top 'NW' top left 'W' left ===== ============ """ if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2: self._anchor = anchor else: raise ValueError('argument must be among %s' % ', '.join(mtransforms.BBox.coefs.keys())) def get_data_ratio(self): """ Returns the aspect ratio of the raw data. This method is intended to be overridden by new projection types. """ xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) return ysize/xsize def apply_aspect(self, position=None): ''' Use :meth:`_aspect` and :meth:`_adjustable` to modify the axes box or the view limits. ''' if position is None: position = self.get_position(original=True) aspect = self.get_aspect() if aspect == 'auto': self.set_position( position , which='active') return if aspect == 'equal': A = 1 else: A = aspect #Ensure at drawing time that any Axes involved in axis-sharing # does not have its position changed. if self in self._shared_x_axes or self in self._shared_y_axes: if self._adjustable == 'box': self._adjustable = 'datalim' warnings.warn( 'shared axes: "adjustable" is being changed to "datalim"') figW,figH = self.get_figure().get_size_inches() fig_aspect = figH/figW if self._adjustable == 'box': box_aspect = A * self.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect) self.set_position(pb1.anchored(self.get_anchor(), pb), 'active') return # reset active to original in case it had been changed # by prior use of 'box' self.set_position(position, which='active') xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) l,b,w,h = position.bounds box_aspect = fig_aspect * (h/w) data_ratio = box_aspect / A y_expander = (data_ratio*xsize/ysize - 1.0) #print 'y_expander', y_expander # If y_expander > 0, the dy/dx viewLim ratio needs to increase if abs(y_expander) < 0.005: #print 'good enough already' return dL = self.dataLim xr = 1.05 * dL.width yr = 1.05 * dL.height xmarg = xsize - xr ymarg = ysize - yr Ysize = data_ratio * xsize Xsize = ysize / data_ratio Xmarg = Xsize - xr Ymarg = Ysize - yr xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help. ym = 0 #print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax #print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize changex = (self in self._shared_y_axes and self not in self._shared_x_axes) changey = (self in self._shared_x_axes and self not in self._shared_y_axes) if changex and changey: warnings.warn("adjustable='datalim' cannot work with shared " "x and y axes") return if changex: adjust_y = False else: #print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg if xmarg > xm and ymarg > ym: adjy = ((Ymarg > 0 and y_expander < 0) or (Xmarg < 0 and y_expander > 0)) else: adjy = y_expander > 0 #print 'y_expander, adjy', y_expander, adjy adjust_y = changey or adjy #(Ymarg > xmarg) if adjust_y: yc = 0.5*(ymin+ymax) y0 = yc - Ysize/2.0 y1 = yc + Ysize/2.0 self.set_ybound((y0, y1)) #print 'New y0, y1:', y0, y1 #print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize else: xc = 0.5*(xmin+xmax) x0 = xc - Xsize/2.0 x1 = xc + Xsize/2.0 self.set_xbound((x0, x1)) #print 'New x0, x1:', x0, x1 #print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0) def axis(self, *v, **kwargs): ''' Convenience method for manipulating the x and y view limits and the aspect ratio of the plot. *kwargs* are passed on to :meth:`set_xlim` and :meth:`set_ylim` ''' if len(v)==1 and is_string_like(v[0]): s = v[0].lower() if s=='on': self.set_axis_on() elif s=='off': self.set_axis_off() elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'): self.set_autoscale_on(True) self.set_aspect('auto') self.autoscale_view() # self.apply_aspect() if s=='equal': self.set_aspect('equal', adjustable='datalim') elif s == 'scaled': self.set_aspect('equal', adjustable='box', anchor='C') self.set_autoscale_on(False) # Req. by Mark Bakker elif s=='tight': self.autoscale_view(tight=True) self.set_autoscale_on(False) elif s == 'image': self.autoscale_view(tight=True) self.set_autoscale_on(False) self.set_aspect('equal', adjustable='box', anchor='C') else: raise ValueError('Unrecognized string %s to axis; ' 'try on or off' % s) xmin, xmax = self.get_xlim() ymin, ymax = self.get_ylim() return xmin, xmax, ymin, ymax try: v[0] except IndexError: emit = kwargs.get('emit', True) xmin = kwargs.get('xmin', None) xmax = kwargs.get('xmax', None) xmin, xmax = self.set_xlim(xmin, xmax, emit) ymin = kwargs.get('ymin', None) ymax = kwargs.get('ymax', None) ymin, ymax = self.set_ylim(ymin, ymax, emit) return xmin, xmax, ymin, ymax v = v[0] if len(v) != 4: raise ValueError('v must contain [xmin xmax ymin ymax]') self.set_xlim([v[0], v[1]]) self.set_ylim([v[2], v[3]]) return v def get_child_artists(self): """ Return a list of artists the axes contains. .. deprecated:: 0.98 """ raise DeprecationWarning('Use get_children instead') def get_frame(self): 'Return the axes Rectangle frame' warnings.warn('use ax.patch instead', DeprecationWarning) return self.patch def get_legend(self): 'Return the legend.Legend instance, or None if no legend is defined' return self.legend_ def get_images(self): 'return a list of Axes images contained by the Axes' return cbook.silent_list('AxesImage', self.images) def get_lines(self): 'Return a list of lines contained by the Axes' return cbook.silent_list('Line2D', self.lines) def get_xaxis(self): 'Return the XAxis instance' return self.xaxis def get_xgridlines(self): 'Get the x grid lines as a list of Line2D instances' return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines()) def get_xticklines(self): 'Get the xtick lines as a list of Line2D instances' return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines()) def get_yaxis(self): 'Return the YAxis instance' return self.yaxis def get_ygridlines(self): 'Get the y grid lines as a list of Line2D instances' return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines()) def get_yticklines(self): 'Get the ytick lines as a list of Line2D instances' return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines()) #### Adding and tracking artists def has_data(self): '''Return *True* if any artists have been added to axes. This should not be used to determine whether the *dataLim* need to be updated, and may not actually be useful for anything. ''' return ( len(self.collections) + len(self.images) + len(self.lines) + len(self.patches))>0 def add_artist(self, a): 'Add any :class:`~matplotlib.artist.Artist` to the axes' a.set_axes(self) self.artists.append(a) self._set_artist_props(a) a.set_clip_path(self.patch) a._remove_method = lambda h: self.artists.remove(h) def add_collection(self, collection, autolim=True): ''' add a :class:`~matplotlib.collections.Collection` instance to the axes ''' label = collection.get_label() if not label: collection.set_label('collection%d'%len(self.collections)) self.collections.append(collection) self._set_artist_props(collection) collection.set_clip_path(self.patch) if autolim: if collection._paths and len(collection._paths): self.update_datalim(collection.get_datalim(self.transData)) collection._remove_method = lambda h: self.collections.remove(h) def add_line(self, line): ''' Add a :class:`~matplotlib.lines.Line2D` to the list of plot lines ''' self._set_artist_props(line) line.set_clip_path(self.patch) self._update_line_limits(line) if not line.get_label(): line.set_label('_line%d'%len(self.lines)) self.lines.append(line) line._remove_method = lambda h: self.lines.remove(h) def _update_line_limits(self, line): p = line.get_path() if p.vertices.size > 0: self.dataLim.update_from_path(p, self.ignore_existing_data_limits, updatex=line.x_isdata, updatey=line.y_isdata) self.ignore_existing_data_limits = False def add_patch(self, p): """ Add a :class:`~matplotlib.patches.Patch` *p* to the list of axes patches; the clipbox will be set to the Axes clipping box. If the transform is not set, it will be set to :attr:`transData`. """ self._set_artist_props(p) p.set_clip_path(self.patch) self._update_patch_limits(p) self.patches.append(p) p._remove_method = lambda h: self.patches.remove(h) def _update_patch_limits(self, patch): 'update the data limits for patch *p*' # hist can add zero height Rectangles, which is useful to keep # the bins, counts and patches lined up, but it throws off log # scaling. We'll ignore rects with zero height or width in # the auto-scaling if (isinstance(patch, mpatches.Rectangle) and (patch.get_width()==0 or patch.get_height()==0)): return vertices = patch.get_path().vertices if vertices.size > 0: xys = patch.get_patch_transform().transform(vertices) if patch.get_data_transform() != self.transData: transform = (patch.get_data_transform() + self.transData.inverted()) xys = transform.transform(xys) self.update_datalim(xys, updatex=patch.x_isdata, updatey=patch.y_isdata) def add_table(self, tab): ''' Add a :class:`~matplotlib.tables.Table` instance to the list of axes tables ''' self._set_artist_props(tab) self.tables.append(tab) tab.set_clip_path(self.patch) tab._remove_method = lambda h: self.tables.remove(h) def relim(self): 'recompute the data limits based on current artists' # Collections are deliberately not supported (yet); see # the TODO note in artists.py. self.dataLim.ignore(True) self.ignore_existing_data_limits = True for line in self.lines: self._update_line_limits(line) for p in self.patches: self._update_patch_limits(p) def update_datalim(self, xys, updatex=True, updatey=True): 'Update the data lim bbox with seq of xy tups or equiv. 2-D array' # if no data is set currently, the bbox will ignore its # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(xys) and not len(xys): return if not ma.isMaskedArray(xys): xys = np.asarray(xys) self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits, updatex=updatex, updatey=updatey) self.ignore_existing_data_limits = False def update_datalim_numerix(self, x, y): 'Update the data lim bbox with seq of xy tups' # if no data is set currently, the bbox will ignore it's # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(x) and not len(x): return self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits) self.ignore_existing_data_limits = False def update_datalim_bounds(self, bounds): ''' Update the datalim to include the given :class:`~matplotlib.transforms.Bbox` *bounds* ''' self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds])) def _process_unit_info(self, xdata=None, ydata=None, kwargs=None): 'look for unit *kwargs* and update the axis instances as necessary' if self.xaxis is None or self.yaxis is None: return #print 'processing', self.get_geometry() if xdata is not None: # we only need to update if there is nothing set yet. if not self.xaxis.have_units(): self.xaxis.update_units(xdata) #print '\tset from xdata', self.xaxis.units if ydata is not None: # we only need to update if there is nothing set yet. if not self.yaxis.have_units(): self.yaxis.update_units(ydata) #print '\tset from ydata', self.yaxis.units # process kwargs 2nd since these will override default units if kwargs is not None: xunits = kwargs.pop( 'xunits', self.xaxis.units) if xunits!=self.xaxis.units: #print '\tkw setting xunits', xunits self.xaxis.set_units(xunits) # If the units being set imply a different converter, # we need to update. if xdata is not None: self.xaxis.update_units(xdata) yunits = kwargs.pop('yunits', self.yaxis.units) if yunits!=self.yaxis.units: #print '\tkw setting yunits', yunits self.yaxis.set_units(yunits) # If the units being set imply a different converter, # we need to update. if ydata is not None: self.yaxis.update_units(ydata) def in_axes(self, mouseevent): ''' return *True* if the given *mouseevent* (in display coords) is in the Axes ''' return self.patch.contains(mouseevent)[0] def get_autoscale_on(self): """ Get whether autoscaling is applied on plot commands """ return self._autoscaleon def set_autoscale_on(self, b): """ Set whether autoscaling is applied on plot commands accepts: [ *True* | *False* ] """ self._autoscaleon = b def autoscale_view(self, tight=False, scalex=True, scaley=True): """ autoscale the view limits using the data limits. You can selectively autoscale only a single axis, eg, the xaxis by setting *scaley* to *False*. The autoscaling preserves any axis direction reversal that has already been done. """ # if image data only just use the datalim if not self._autoscaleon: return if scalex: xshared = self._shared_x_axes.get_siblings(self) dl = [ax.dataLim for ax in xshared] bb = mtransforms.BboxBase.union(dl) x0, x1 = bb.intervalx if scaley: yshared = self._shared_y_axes.get_siblings(self) dl = [ax.dataLim for ax in yshared] bb = mtransforms.BboxBase.union(dl) y0, y1 = bb.intervaly if (tight or (len(self.images)>0 and len(self.lines)==0 and len(self.patches)==0)): if scalex: self.set_xbound(x0, x1) if scaley: self.set_ybound(y0, y1) return if scalex: XL = self.xaxis.get_major_locator().view_limits(x0, x1) self.set_xbound(XL) if scaley: YL = self.yaxis.get_major_locator().view_limits(y0, y1) self.set_ybound(YL) #### Drawing def draw(self, renderer=None, inframe=False): "Draw everything (plot lines, axes, labels)" if renderer is None: renderer = self._cachedRenderer if renderer is None: raise RuntimeError('No renderer defined') if not self.get_visible(): return renderer.open_group('axes') self.apply_aspect() # the patch draws the background rectangle -- the frame below # will draw the edges if self.axison and self._frameon: self.patch.draw(renderer) artists = [] if len(self.images)<=1 or renderer.option_image_nocomposite(): for im in self.images: im.draw(renderer) else: # make a composite image blending alpha # list of (mimage.Image, ox, oy) mag = renderer.get_image_magnification() ims = [(im.make_image(mag),0,0) for im in self.images if im.get_visible()] l, b, r, t = self.bbox.extents width = mag*((round(r) + 0.5) - (round(l) - 0.5)) height = mag*((round(t) + 0.5) - (round(b) - 0.5)) im = mimage.from_images(height, width, ims) im.is_grayscale = False l, b, w, h = self.bbox.bounds # composite images need special args so they will not # respect z-order for now renderer.draw_image( round(l), round(b), im, self.bbox, self.patch.get_path(), self.patch.get_transform()) artists.extend(self.collections) artists.extend(self.patches) artists.extend(self.lines) artists.extend(self.texts) artists.extend(self.artists) if self.axison and not inframe: if self._axisbelow: self.xaxis.set_zorder(0.5) self.yaxis.set_zorder(0.5) else: self.xaxis.set_zorder(2.5) self.yaxis.set_zorder(2.5) artists.extend([self.xaxis, self.yaxis]) if not inframe: artists.append(self.title) artists.extend(self.tables) if self.legend_ is not None: artists.append(self.legend_) # the frame draws the edges around the axes patch -- we # decouple these so the patch can be in the background and the # frame in the foreground. if self.axison and self._frameon: artists.append(self.frame) dsu = [ (a.zorder, i, a) for i, a in enumerate(artists) if not a.get_animated() ] dsu.sort() for zorder, i, a in dsu: a.draw(renderer) renderer.close_group('axes') self._cachedRenderer = renderer def draw_artist(self, a): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None a.draw(self._cachedRenderer) def redraw_in_frame(self): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None self.draw(self._cachedRenderer, inframe=True) def get_renderer_cache(self): return self._cachedRenderer def __draw_animate(self): # ignore for now; broken if self._lastRenderer is None: raise RuntimeError('You must first call ax.draw()') dsu = [(a.zorder, a) for a in self.animated.keys()] dsu.sort() renderer = self._lastRenderer renderer.blit() for tmp, a in dsu: a.draw(renderer) #### Axes rectangle characteristics def get_frame_on(self): """ Get whether the axes rectangle patch is drawn """ return self._frameon def set_frame_on(self, b): """ Set whether the axes rectangle patch is drawn ACCEPTS: [ *True* | *False* ] """ self._frameon = b def get_axisbelow(self): """ Get whether axis below is true or not """ return self._axisbelow def set_axisbelow(self, b): """ Set whether the axis ticks and gridlines are above or below most artists ACCEPTS: [ *True* | *False* ] """ self._axisbelow = b def grid(self, b=None, **kwargs): """ call signature:: grid(self, b=None, **kwargs) Set the axes grids on or off; *b* is a boolean If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If *kwargs* are supplied, it is assumed that you want a grid and *b* is thus set to *True* *kawrgs* are used to set the grid line properties, eg:: ax.grid(color='r', linestyle='-', linewidth=2) Valid :class:`~matplotlib.lines.Line2D` kwargs are %(Line2D)s """ if len(kwargs): b = True self.xaxis.grid(b, **kwargs) self.yaxis.grid(b, **kwargs) grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd def ticklabel_format(self, **kwargs): """ Convenience method for manipulating the ScalarFormatter used by default for linear axes. Optional keyword arguments: ============ ===================================== Keyword Description ============ ===================================== *style* [ 'sci' (or 'scientific') | 'plain' ] plain turns off scientific notation *scilimits* (m, n), pair of integers; if *style* is 'sci', scientific notation will be used for numbers outside the range 10`-m`:sup: to 10`n`:sup:. Use (0,0) to include all numbers. *axis* [ 'x' | 'y' | 'both' ] ============ ===================================== Only the major ticks are affected. If the method is called when the :class:`~matplotlib.ticker.ScalarFormatter` is not the :class:`~matplotlib.ticker.Formatter` being used, an :exc:`AttributeError` will be raised. """ style = kwargs.pop('style', '').lower() scilimits = kwargs.pop('scilimits', None) if scilimits is not None: try: m, n = scilimits m+n+1 # check that both are numbers except (ValueError, TypeError): raise ValueError("scilimits must be a sequence of 2 integers") axis = kwargs.pop('axis', 'both').lower() if style[:3] == 'sci': sb = True elif style in ['plain', 'comma']: sb = False if style == 'plain': cb = False else: cb = True raise NotImplementedError, "comma style remains to be added" elif style == '': sb = None else: raise ValueError, "%s is not a valid style value" try: if sb is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_scientific(sb) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_scientific(sb) if scilimits is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_powerlimits(scilimits) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_powerlimits(scilimits) except AttributeError: raise AttributeError( "This method only works with the ScalarFormatter.") def set_axis_off(self): """turn off the axis""" self.axison = False def set_axis_on(self): """turn on the axis""" self.axison = True def get_axis_bgcolor(self): 'Return the axis background color' return self._axisbg def set_axis_bgcolor(self, color): """ set the axes background color ACCEPTS: any matplotlib color - see :func:`~matplotlib.pyplot.colors` """ self._axisbg = color self.patch.set_facecolor(color) ### data limits, ticks, tick labels, and formatting def invert_xaxis(self): "Invert the x-axis." left, right = self.get_xlim() self.set_xlim(right, left) def xaxis_inverted(self): 'Returns True if the x-axis is inverted.' left, right = self.get_xlim() return right < left def get_xbound(self): """ Returns the x-axis numerical bounds where:: lowerBound < upperBound """ left, right = self.get_xlim() if left < right: return left, right else: return right, left def set_xbound(self, lower=None, upper=None): """ Set the lower and upper numerical bounds of the x-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_xbound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.xaxis_inverted(): if lower < upper: self.set_xlim(upper, lower) else: self.set_xlim(lower, upper) else: if lower < upper: self.set_xlim(lower, upper) else: self.set_xlim(upper, lower) def get_xlim(self): """ Get the x-axis range [*xmin*, *xmax*] """ return tuple(self.viewLim.intervalx) def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs): """ call signature:: set_xlim(self, *args, **kwargs) Set the limits for the xaxis Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*] Examples:: set_xlim((valmin, valmax)) set_xlim(valmin, valmax) set_xlim(xmin=1) # xmax unchanged set_xlim(xmax=1) # xmin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change ACCEPTS: len(2) sequence of floats """ if xmax is None and iterable(xmin): xmin,xmax = xmin self._process_unit_info(xdata=(xmin, xmax)) if xmin is not None: xmin = self.convert_xunits(xmin) if xmax is not None: xmax = self.convert_xunits(xmax) old_xmin,old_xmax = self.get_xlim() if xmin is None: xmin = old_xmin if xmax is None: xmax = old_xmax xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False) xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax) self.viewLim.intervalx = (xmin, xmax) if emit: self.callbacks.process('xlim_changed', self) # Call all of the other x-axes that are shared with this one for other in self._shared_x_axes.get_siblings(self): if other is not self: other.set_xlim(self.viewLim.intervalx, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return xmin, xmax def get_xscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.xaxis.get_scale() def set_xscale(self, value, **kwargs): """ call signature:: set_xscale(value) Set the scaling of the x-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.xaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_xticks(self, minor=False): 'Return the x ticks as a list of locations' return self.xaxis.get_ticklocs(minor=minor) def set_xticks(self, ticks, minor=False): """ Set the x ticks with list of *ticks* ACCEPTS: sequence of floats """ return self.xaxis.set_ticks(ticks, minor=minor) def get_xmajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_majorticklabels()) def get_xminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_minorticklabels()) def get_xticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_ticklabels(minor=minor)) def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_xticklabels(labels, fontdict=None, minor=False, **kwargs) Set the xtick labels with list of strings *labels*. Return a list of axis text instances. *kwargs* set the :class:`~matplotlib.text.Text` properties. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.xaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_xticklabels.__doc__ = cbook.dedent( set_xticklabels.__doc__) % martist.kwdocd def invert_yaxis(self): "Invert the y-axis." left, right = self.get_ylim() self.set_ylim(right, left) def yaxis_inverted(self): 'Returns True if the y-axis is inverted.' left, right = self.get_ylim() return right < left def get_ybound(self): "Return y-axis numerical bounds in the form of lowerBound < upperBound" left, right = self.get_ylim() if left < right: return left, right else: return right, left def set_ybound(self, lower=None, upper=None): """Set the lower and upper numerical bounds of the y-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_ybound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.yaxis_inverted(): if lower < upper: self.set_ylim(upper, lower) else: self.set_ylim(lower, upper) else: if lower < upper: self.set_ylim(lower, upper) else: self.set_ylim(upper, lower) def get_ylim(self): """ Get the y-axis range [*ymin*, *ymax*] """ return tuple(self.viewLim.intervaly) def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs): """ call signature:: set_ylim(self, *args, **kwargs): Set the limits for the yaxis; v = [ymin, ymax]:: set_ylim((valmin, valmax)) set_ylim(valmin, valmax) set_ylim(ymin=1) # ymax unchanged set_ylim(ymax=1) # ymin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change Returns the current ylimits as a length 2 tuple ACCEPTS: len(2) sequence of floats """ if ymax is None and iterable(ymin): ymin,ymax = ymin if ymin is not None: ymin = self.convert_yunits(ymin) if ymax is not None: ymax = self.convert_yunits(ymax) old_ymin,old_ymax = self.get_ylim() if ymin is None: ymin = old_ymin if ymax is None: ymax = old_ymax ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False) ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax) self.viewLim.intervaly = (ymin, ymax) if emit: self.callbacks.process('ylim_changed', self) # Call all of the other y-axes that are shared with this one for other in self._shared_y_axes.get_siblings(self): if other is not self: other.set_ylim(self.viewLim.intervaly, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return ymin, ymax def get_yscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.yaxis.get_scale() def set_yscale(self, value, **kwargs): """ call signature:: set_yscale(value) Set the scaling of the y-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.yaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_yticks(self, minor=False): 'Return the y ticks as a list of locations' return self.yaxis.get_ticklocs(minor=minor) def set_yticks(self, ticks, minor=False): """ Set the y ticks with list of *ticks* ACCEPTS: sequence of floats Keyword arguments: *minor*: [ False | True ] Sets the minor ticks if True """ return self.yaxis.set_ticks(ticks, minor=minor) def get_ymajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_majorticklabels()) def get_yminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_minorticklabels()) def get_yticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_ticklabels(minor=minor)) def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_yticklabels(labels, fontdict=None, minor=False, **kwargs) Set the ytick labels with list of strings *labels*. Return a list of :class:`~matplotlib.text.Text` instances. *kwargs* set :class:`~matplotlib.text.Text` properties for the labels. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.yaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_yticklabels.__doc__ = cbook.dedent( set_yticklabels.__doc__) % martist.kwdocd def xaxis_date(self, tz=None): """Sets up x-axis ticks and labels that treat the x data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ xmin, xmax = self.dataLim.intervalx if xmin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(xdata=(dmin, dmax)) dmin, dmax = self.convert_xunits([dmin, dmax]) self.viewLim.intervalx = dmin, dmax self.dataLim.intervalx = dmin, dmax locator = self.xaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.xaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervalx[0]==0.: self.viewLim.intervalx = tuple(self.dataLim.intervalx) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.xaxis.set_major_formatter(formatter) def yaxis_date(self, tz=None): """Sets up y-axis ticks and labels that treat the y data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ ymin, ymax = self.dataLim.intervaly if ymin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(ydata=(dmin, dmax)) dmin, dmax = self.convert_yunits([dmin, dmax]) self.viewLim.intervaly = dmin, dmax self.dataLim.intervaly = dmin, dmax locator = self.yaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.yaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervaly[0]==0.: self.viewLim.intervaly = tuple(self.dataLim.intervaly) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.yaxis.set_major_formatter(formatter) def format_xdata(self, x): """ Return *x* string formatted. This function will use the attribute self.fmt_xdata if it is callable, else will fall back on the xaxis major formatter """ try: return self.fmt_xdata(x) except TypeError: func = self.xaxis.get_major_formatter().format_data_short val = func(x) return val def format_ydata(self, y): """ Return y string formatted. This function will use the :attr:`fmt_ydata` attribute if it is callable, else will fall back on the yaxis major formatter """ try: return self.fmt_ydata(y) except TypeError: func = self.yaxis.get_major_formatter().format_data_short val = func(y) return val def format_coord(self, x, y): 'return a format string formatting the *x*, *y* coord' if x is None: x = '???' if y is None: y = '???' xs = self.format_xdata(x) ys = self.format_ydata(y) return 'x=%s, y=%s'%(xs,ys) #### Interactive manipulation def can_zoom(self): """ Return *True* if this axes support the zoom box """ return True def get_navigate(self): """ Get whether the axes responds to navigation commands """ return self._navigate def set_navigate(self, b): """ Set whether the axes responds to navigation toolbar commands ACCEPTS: [ True | False ] """ self._navigate = b def get_navigate_mode(self): """ Get the navigation toolbar button status: 'PAN', 'ZOOM', or None """ return self._navigate_mode def set_navigate_mode(self, b): """ Set the navigation toolbar button status; .. warning:: this is not a user-API function. """ self._navigate_mode = b def start_pan(self, x, y, button): """ Called when a pan operation has started. *x*, *y* are the mouse coordinates in display coords. button is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT .. note:: Intended to be overridden by new projection types. """ self._pan_start = cbook.Bunch( lim = self.viewLim.frozen(), trans = self.transData.frozen(), trans_inverse = self.transData.inverted().frozen(), bbox = self.bbox.frozen(), x = x, y = y ) def end_pan(self): """ Called when a pan operation completes (when the mouse button is up.) .. note:: Intended to be overridden by new projection types. """ del self._pan_start def drag_pan(self, button, key, x, y): """ Called when the mouse moves during a pan operation. *button* is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT *key* is a "shift" key *x*, *y* are the mouse coordinates in display coords. .. note:: Intended to be overridden by new projection types. """ def format_deltas(key, dx, dy): if key=='control': if(abs(dx)>abs(dy)): dy = dx else: dx = dy elif key=='x': dy = 0 elif key=='y': dx = 0 elif key=='shift': if 2*abs(dx) < abs(dy): dx=0 elif 2*abs(dy) < abs(dx): dy=0 elif(abs(dx)>abs(dy)): dy=dy/abs(dy)*abs(dx) else: dx=dx/abs(dx)*abs(dy) return (dx,dy) p = self._pan_start dx = x - p.x dy = y - p.y if dx == 0 and dy == 0: return if button == 1: dx, dy = format_deltas(key, dx, dy) result = p.bbox.translated(-dx, -dy) \ .transformed(p.trans_inverse) elif button == 3: try: dx = -dx / float(self.bbox.width) dy = -dy / float(self.bbox.height) dx, dy = format_deltas(key, dx, dy) if self.get_aspect() != 'auto': dx = 0.5 * (dx + dy) dy = dx alpha = np.power(10.0, (dx, dy)) start = p.trans_inverse.transform_point((p.x, p.y)) lim_points = p.lim.get_points() result = start + alpha * (lim_points - start) result = mtransforms.Bbox(result) except OverflowError: warnings.warn('Overflow while panning') return self.set_xlim(*result.intervalx) self.set_ylim(*result.intervaly) def get_cursor_props(self): """ return the cursor propertiess as a (*linewidth*, *color*) tuple, where *linewidth* is a float and *color* is an RGBA tuple """ return self._cursorProps def set_cursor_props(self, *args): """ Set the cursor property as:: ax.set_cursor_props(linewidth, color) or:: ax.set_cursor_props((linewidth, color)) ACCEPTS: a (*float*, *color*) tuple """ if len(args)==1: lw, c = args[0] elif len(args)==2: lw, c = args else: raise ValueError('args must be a (linewidth, color) tuple') c =mcolors.colorConverter.to_rgba(c) self._cursorProps = lw, c def connect(self, s, func): """ Register observers to be notified when certain events occur. Register with callback functions with the following signatures. The function has the following signature:: func(ax) # where ax is the instance making the callback. The following events can be connected to: 'xlim_changed','ylim_changed' The connection id is is returned - you can use this with disconnect to disconnect from the axes event """ raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def disconnect(self, cid): 'disconnect from the Axes event.' raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def get_children(self): 'return a list of child artists' children = [] children.append(self.xaxis) children.append(self.yaxis) children.extend(self.lines) children.extend(self.patches) children.extend(self.texts) children.extend(self.tables) children.extend(self.artists) children.extend(self.images) if self.legend_ is not None: children.append(self.legend_) children.extend(self.collections) children.append(self.title) children.append(self.patch) children.append(self.frame) return children def contains(self,mouseevent): """Test whether the mouse event occured in the axes. Returns T/F, {} """ if callable(self._contains): return self._contains(self,mouseevent) return self.patch.contains(mouseevent) def pick(self, *args): """ call signature:: pick(mouseevent) each child artist will fire a pick event if mouseevent is over the artist and the artist has picker set """ if len(args)>1: raise DeprecationWarning('New pick API implemented -- ' 'see API_CHANGES in the src distribution') martist.Artist.pick(self,args[0]) def __pick(self, x, y, trans=None, among=None): """ Return the artist under point that is closest to the *x*, *y*. If *trans* is *None*, *x*, and *y* are in window coords, (0,0 = lower left). Otherwise, *trans* is a :class:`~matplotlib.transforms.Transform` that specifies the coordinate system of *x*, *y*. The selection of artists from amongst which the pick function finds an artist can be narrowed using the optional keyword argument *among*. If provided, this should be either a sequence of permitted artists or a function taking an artist as its argument and returning a true value if and only if that artist can be selected. Note this algorithm calculates distance to the vertices of the polygon, so if you want to pick a patch, click on the edge! """ # MGDTODO: Needs updating if trans is not None: xywin = trans.transform_point((x,y)) else: xywin = x,y def dist_points(p1, p2): 'return the distance between two points' x1, y1 = p1 x2, y2 = p2 return math.sqrt((x1-x2)**2+(y1-y2)**2) def dist_x_y(p1, x, y): '*x* and *y* are arrays; return the distance to the closest point' x1, y1 = p1 return min(np.sqrt((x-x1)**2+(y-y1)**2)) def dist(a): if isinstance(a, Text): bbox = a.get_window_extent() l,b,w,h = bbox.bounds verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b) xt, yt = zip(*verts) elif isinstance(a, Patch): path = a.get_path() tverts = a.get_transform().transform_path(path) xt, yt = zip(*tverts) elif isinstance(a, mlines.Line2D): xdata = a.get_xdata(orig=False) ydata = a.get_ydata(orig=False) xt, yt = a.get_transform().numerix_x_y(xdata, ydata) return dist_x_y(xywin, np.asarray(xt), np.asarray(yt)) artists = self.lines + self.patches + self.texts if callable(among): artists = filter(test, artists) elif iterable(among): amongd = dict([(k,1) for k in among]) artists = [a for a in artists if a in amongd] elif among is None: pass else: raise ValueError('among must be callable or iterable') if not len(artists): return None ds = [ (dist(a),a) for a in artists] ds.sort() return ds[0][1] #### Labelling def get_title(self): """ Get the title text string. """ return self.title.get_text() def set_title(self, label, fontdict=None, **kwargs): """ call signature:: set_title(label, fontdict=None, **kwargs): Set the title for the axes. kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ default = { 'fontsize':rcParams['axes.titlesize'], 'verticalalignment' : 'bottom', 'horizontalalignment' : 'center' } self.title.set_text(label) self.title.update(default) if fontdict is not None: self.title.update(fontdict) self.title.update(kwargs) return self.title set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd def get_xlabel(self): """ Get the xlabel text string. """ label = self.xaxis.get_label() return label.get_text() def set_xlabel(self, xlabel, fontdict=None, **kwargs): """ call signature:: set_xlabel(xlabel, fontdict=None, **kwargs) Set the label for the xaxis. Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.xaxis.get_label() label.set_text(xlabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd def get_ylabel(self): """ Get the ylabel text string. """ label = self.yaxis.get_label() return label.get_text() def set_ylabel(self, ylabel, fontdict=None, **kwargs): """ call signature:: set_ylabel(ylabel, fontdict=None, **kwargs) Set the label for the yaxis Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.yaxis.get_label() label.set_text(ylabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd def text(self, x, y, s, fontdict=None, withdash=False, **kwargs): """ call signature:: text(x, y, s, fontdict=None, **kwargs) Add text in string *s* to axis at location *x*, *y*, data coordinates. Keyword arguments: *fontdict*: A dictionary to override the default text properties. If *fontdict* is *None*, the defaults are determined by your rc parameters. *withdash*: [ False | True ] Creates a :class:`~matplotlib.text.TextWithDash` instance instead of a :class:`~matplotlib.text.Text` instance. Individual keyword arguments can be used to override any given parameter:: text(x, y, s, fontsize=12) The default transform specifies that text is in data coords, alternatively, you can specify text in axis coords (0,0 is lower-left and 1,1 is upper-right). The example below places text in the center of the axes:: text(0.5, 0.5,'matplotlib', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) You can put a rectangular box around the text instance (eg. to set a background color) by using the keyword *bbox*. *bbox* is a dictionary of :class:`matplotlib.patches.Rectangle` properties. For example:: text(x, y, s, bbox=dict(facecolor='red', alpha=0.5)) Valid kwargs are :class:`matplotlib.text.Text` properties: %(Text)s """ default = { 'verticalalignment' : 'bottom', 'horizontalalignment' : 'left', #'verticalalignment' : 'top', 'transform' : self.transData, } # At some point if we feel confident that TextWithDash # is robust as a drop-in replacement for Text and that # the performance impact of the heavier-weight class # isn't too significant, it may make sense to eliminate # the withdash kwarg and simply delegate whether there's # a dash to TextWithDash and dashlength. if withdash: t = mtext.TextWithDash( x=x, y=y, text=s, ) else: t = mtext.Text( x=x, y=y, text=s, ) self._set_artist_props(t) t.update(default) if fontdict is not None: t.update(fontdict) t.update(kwargs) self.texts.append(t) t._remove_method = lambda h: self.texts.remove(h) #if t.get_clip_on(): t.set_clip_box(self.bbox) if 'clip_on' in kwargs: t.set_clip_box(self.bbox) return t text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd def annotate(self, *args, **kwargs): """ call signature:: annotate(s, xy, xytext=None, xycoords='data', textcoords='data', arrowprops=None, **kwargs) Keyword arguments: %(Annotation)s .. plot:: mpl_examples/pylab_examples/annotation_demo2.py """ a = mtext.Annotation(*args, **kwargs) a.set_transform(mtransforms.IdentityTransform()) self._set_artist_props(a) if kwargs.has_key('clip_on'): a.set_clip_path(self.patch) self.texts.append(a) return a annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd #### Lines and spans def axhline(self, y=0, xmin=0, xmax=1, **kwargs): """ call signature:: axhline(y=0, xmin=0, xmax=1, **kwargs) Axis Horizontal Line Draw a horizontal line at *y* from *xmin* to *xmax*. With the default values of *xmin* = 0 and *xmax* = 1, this line will always span the horizontal extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red hline at *y* = 0 that spans the xrange >>> axhline(linewidth=4, color='r') * draw a default hline at *y* = 1 that spans the xrange >>> axhline(y=1) * draw a default hline at *y* = .5 that spans the the middle half of the xrange >>> axhline(y=.5, xmin=0.25, xmax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ ymin, ymax = self.get_ybound() # We need to strip away the units for comparison with # non-unitized bounds yy = self.convert_yunits( y ) scaley = (yy<ymin) or (yy>ymax) trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs) l.x_isdata = False self.add_line(l) self.autoscale_view(scalex=False, scaley=scaley) return l axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd def axvline(self, x=0, ymin=0, ymax=1, **kwargs): """ call signature:: axvline(x=0, ymin=0, ymax=1, **kwargs) Axis Vertical Line Draw a vertical line at *x* from *ymin* to *ymax*. With the default values of *ymin* = 0 and *ymax* = 1, this line will always span the vertical extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red vline at *x* = 0 that spans the yrange >>> axvline(linewidth=4, color='r') * draw a default vline at *x* = 1 that spans the yrange >>> axvline(x=1) * draw a default vline at *x* = .5 that spans the the middle half of the yrange >>> axvline(x=.5, ymin=0.25, ymax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ xmin, xmax = self.get_xbound() # We need to strip away the units for comparison with # non-unitized bounds xx = self.convert_xunits( x ) scalex = (xx<xmin) or (xx>xmax) trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs) l.y_isdata = False self.add_line(l) self.autoscale_view(scalex=scalex, scaley=False) return l axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs): """ call signature:: axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs) Axis Horizontal Span. *y* coords are in data units and *x* coords are in axes (relative 0-1) units. Draw a horizontal span (rectangle) from *ymin* to *ymax*. With the default values of *xmin* = 0 and *xmax* = 1, this always spans the xrange, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is a :class:`matplotlib.patches.Polygon` instance. Examples: * draw a gray rectangle from *y* = 0.25-0.75 that spans the horizontal extent of the axes >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/axhspan_demo.py """ trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin) p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.x_isdata = False self.add_patch(p) return p axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs): """ call signature:: axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs) Axis Vertical Span. *x* coords are in data units and *y* coords are in axes (relative 0-1) units. Draw a vertical span (rectangle) from *xmin* to *xmax*. With the default values of *ymin* = 0 and *ymax* = 1, this always spans the yrange, regardless of the ylim settings, even if you change them, eg. with the :meth:`set_ylim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *y* location is in data coordinates. Return value is the :class:`matplotlib.patches.Polygon` instance. Examples: * draw a vertical green translucent rectangle from x=1.25 to 1.55 that spans the yrange of the axes >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s .. seealso:: :meth:`axhspan`: for example plot and source code """ trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)] p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.y_isdata = False self.add_patch(p) return p axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd def hlines(self, y, xmin, xmax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs) Plot horizontal lines at each *y* from *xmin* to *xmax*. Returns the :class:`~matplotlib.collections.LineCollection` that was added. Required arguments: *y*: a 1-D numpy array or iterable. *xmin* and *xmax*: can be scalars or ``len(x)`` numpy arrays. If they are scalars, then the respective values are constant, else the widths of the lines are determined by *xmin* and *xmax*. Optional keyword arguments: *colors*: a line collections color argument, either a single color or a ``len(y)`` list of colors *linestyles*: [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] **Example:** .. plot:: mpl_examples/pylab_examples/hline_demo.py """ if kwargs.get('fmt') is not None: raise DeprecationWarning('hlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') # We do the conversion first since not all unitized data is uniform y = self.convert_yunits( y ) xmin = self.convert_xunits( xmin ) xmax = self.convert_xunits( xmax ) if not iterable(y): y = [y] if not iterable(xmin): xmin = [xmin] if not iterable(xmax): xmax = [xmax] y = np.asarray(y) xmin = np.asarray(xmin) xmax = np.asarray(xmax) if len(xmin)==1: xmin = np.resize( xmin, y.shape ) if len(xmax)==1: xmax = np.resize( xmax, y.shape ) if len(xmin)!=len(y): raise ValueError, 'xmin and y are unequal sized sequences' if len(xmax)!=len(y): raise ValueError, 'xmax and y are unequal sized sequences' verts = [ ((thisxmin, thisy), (thisxmax, thisy)) for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)] coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min(xmin.min(), xmax.min()) maxx = max(xmin.max(), xmax.max()) miny = y.min() maxy = y.max() corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll hlines.__doc__ = cbook.dedent(hlines.__doc__) def vlines(self, x, ymin, ymax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: vlines(x, ymin, ymax, color='k', linestyles='solid') Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin* or *ymax* can be scalars or len(*x*) numpy arrays. If they are scalars, then the respective values are constant, else the heights of the lines are determined by *ymin* and *ymax*. *colors* a line collections color args, either a single color or a len(*x*) list of colors *linestyles* one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] Returns the :class:`matplotlib.collections.LineCollection` that was added. kwargs are :class:`~matplotlib.collections.LineCollection` properties: %(LineCollection)s """ if kwargs.get('fmt') is not None: raise DeprecationWarning('vlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs) # We do the conversion first since not all unitized data is uniform x = self.convert_xunits( x ) ymin = self.convert_yunits( ymin ) ymax = self.convert_yunits( ymax ) if not iterable(x): x = [x] if not iterable(ymin): ymin = [ymin] if not iterable(ymax): ymax = [ymax] x = np.asarray(x) ymin = np.asarray(ymin) ymax = np.asarray(ymax) if len(ymin)==1: ymin = np.resize( ymin, x.shape ) if len(ymax)==1: ymax = np.resize( ymax, x.shape ) if len(ymin)!=len(x): raise ValueError, 'ymin and x are unequal sized sequences' if len(ymax)!=len(x): raise ValueError, 'ymax and x are unequal sized sequences' Y = np.array([ymin, ymax]).T verts = [ ((thisx, thisymin), (thisx, thisymax)) for thisx, (thisymin, thisymax) in zip(x,Y)] #print 'creating line collection' coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min( x ) maxx = max( x ) miny = min( min(ymin), min(ymax) ) maxy = max( max(ymin), max(ymax) ) corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd #### Basic plotting def plot(self, *args, **kwargs): """ Plot lines and/or markers to the :class:`~matplotlib.axes.Axes`. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional format string. For example, each of the following is legal:: plot(x, y) # plot x and y using default line style and color plot(x, y, 'bo') # plot x and y using blue circle markers plot(y) # plot y using x as index array 0..N-1 plot(y, 'r+') # ditto, but with red plusses If *x* and/or *y* is 2-dimensional, then the corresponding columns will be plotted. An arbitrary number of *x*, *y*, *fmt* groups can be specified, as in:: a.plot(x1, y1, 'g^', x2, y2, 'g-') Return value is a list of lines that were added. The following format string characters are accepted to control the line style or marker: ================ =============================== character description ================ =============================== '-' solid line style '--' dashed line style '-.' dash-dot line style ':' dotted line style '.' point marker ',' pixel marker 'o' circle marker 'v' triangle_down marker '^' triangle_up marker '<' triangle_left marker '>' triangle_right marker '1' tri_down marker '2' tri_up marker '3' tri_left marker '4' tri_right marker 's' square marker 'p' pentagon marker '*' star marker 'h' hexagon1 marker 'H' hexagon2 marker '+' plus marker 'x' x marker 'D' diamond marker 'd' thin_diamond marker '|' vline marker '_' hline marker ================ =============================== The following color abbreviations are supported: ========== ======== character color ========== ======== 'b' blue 'g' green 'r' red 'c' cyan 'm' magenta 'y' yellow 'k' black 'w' white ========== ======== In addition, you can specify colors in many weird and wonderful ways, including full names (``'green'``), hex strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or grayscale intensities as a string (``'0.8'``). Of these, the string specifications can be used in place of a ``fmt`` group, but the tuple forms can be used only as ``kwargs``. Line styles and colors are combined in a single format string, as in ``'bo'`` for blue circles. The *kwargs* can be used to set line properties (any property that has a ``set_*`` method). You can use this to set a line label (for auto legends), linewidth, anitialising, marker face color, etc. Here is an example:: plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2) plot([1,2,3], [1,4,9], 'rs', label='line 2') axis([0, 4, 0, 10]) legend() If you make multiple lines with one plot command, the kwargs apply to all those lines, e.g.:: plot(x1, y1, x2, y2, antialised=False) Neither line will be antialiased. You do not need to use format strings, which are just abbreviations. All of the line properties can be controlled by keyword arguments. For example, you can set the color, marker, linestyle, and markercolor with:: plot(x, y, color='green', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=12). See :class:`~matplotlib.lines.Line2D` for details. The kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. """ scalex = kwargs.pop( 'scalex', True) scaley = kwargs.pop( 'scaley', True) if not self._hold: self.cla() lines = [] for line in self._get_lines(*args, **kwargs): self.add_line(line) lines.append(line) self.autoscale_view(scalex=scalex, scaley=scaley) return lines plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs): """ call signature:: plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs) Similar to the :func:`~matplotlib.pyplot.plot` command, except the *x* or *y* (or both) data is considered to be dates, and the axis is labeled accordingly. *x* and/or *y* can be a sequence of dates represented as float days since 0001-01-01 UTC. Keyword arguments: *fmt*: string The plot format string. *tz*: [ None | timezone string ] The time zone to use in labeling dates. If *None*, defaults to rc value. *xdate*: [ True | False ] If *True*, the *x*-axis will be labeled with dates. *ydate*: [ False | True ] If *True*, the *y*-axis will be labeled with dates. Note if you are using custom date tickers and formatters, it may be necessary to set the formatters/locators after the call to :meth:`plot_date` since :meth:`plot_date` will set the default tick locator to :class:`matplotlib.ticker.AutoDateLocator` (if the tick locator is not already set to a :class:`matplotlib.ticker.DateLocator` instance) and the default tick formatter to :class:`matplotlib.ticker.AutoDateFormatter` (if the tick formatter is not already set to a :class:`matplotlib.ticker.DateFormatter` instance). Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :mod:`~matplotlib.dates`: for helper functions :func:`~matplotlib.dates.date2num`, :func:`~matplotlib.dates.num2date` and :func:`~matplotlib.dates.drange`: for help on creating the required floating point dates. """ if not self._hold: self.cla() ret = self.plot(x, y, fmt, **kwargs) if xdate: self.xaxis_date(tz) if ydate: self.yaxis_date(tz) self.autoscale_view() return ret plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd def loglog(self, *args, **kwargs): """ call signature:: loglog(*args, **kwargs) Make a plot with log scaling on the *x* and *y* axis. :func:`~matplotlib.pyplot.loglog` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basex*/*basey*: scalar > 1 base of the *x*/*y* logarithm *subsx*/*subsy*: [ None | sequence ] the location of the minor *x*/*y* ticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale` for details The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/log_demo.py """ if not self._hold: self.cla() dx = {'basex': kwargs.pop('basex', 10), 'subsx': kwargs.pop('subsx', None), } dy = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_xscale('log', **dx) self.set_yscale('log', **dy) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd def semilogx(self, *args, **kwargs): """ call signature:: semilogx(*args, **kwargs) Make a plot with log scaling on the *x* axis. :func:`semilogx` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale`. Notable keyword arguments: *basex*: scalar > 1 base of the *x* logarithm *subsx*: [ None | sequence ] The location of the minor xticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basex': kwargs.pop( 'basex', 10), 'subsx': kwargs.pop( 'subsx', None), } self.set_xscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd def semilogy(self, *args, **kwargs): """ call signature:: semilogy(*args, **kwargs) Make a plot with log scaling on the *y* axis. :func:`semilogy` supports all the keyword arguments of :func:`~matplotlib.pylab.plot` and :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basey*: scalar > 1 Base of the *y* logarithm *subsy*: [ None | sequence ] The location of the minor yticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_yscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd def acorr(self, x, **kwargs): """ call signature:: acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs) Plot the autocorrelation of *x*. If *normed* = *True*, normalize the data by the autocorrelation at 0-th lag. *x* is detrended by the *detrend* callable (default no normalization). Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length 2*maxlags+1 lag vector - *c* is the 2*maxlags+1 auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :meth:`plot` The default *linestyle* is None and the default *marker* is ``'o'``, though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines` rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw vertical lines from the origin to the acorr. Otherwise, the plot style is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all :math:`2 \mathrm{len}(x) - 1` lags. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where - *linecol* is the :class:`~matplotlib.collections.LineCollection` - *b* is the *x*-axis. .. seealso:: :meth:`~matplotlib.axes.Axes.plot` or :meth:`~matplotlib.axes.Axes.vlines`: For documentation on valid kwargs. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ return self.xcorr(x, x, **kwargs) acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs): """ call signature:: xcorr(x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, **kwargs): Plot the cross correlation between *x* and *y*. If *normed* = *True*, normalize the data by the cross correlation at 0-th lag. *x* and y are detrended by the *detrend* callable (default no normalization). *x* and *y* must be equal length. Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length ``2*maxlags+1`` lag vector - *c* is the ``2*maxlags+1`` auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :func:`~matplotlib.pyplot.plot`. The default *linestyle* is *None* and the default *marker* is 'o', though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*: :func:`~matplotlib.pyplot.vlines` rather than :func:`~matplotlib.pyplot.plot` is used to draw vertical lines from the origin to the xcorr. Otherwise the plotstyle is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where *linecol* is the :class:`matplotlib.collections.LineCollection` instance and *b* is the *x*-axis. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all ``(2*len(x)-1)`` lags. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ Nx = len(x) if Nx!=len(y): raise ValueError('x and y must be equal length') x = detrend(np.asarray(x)) y = detrend(np.asarray(y)) c = np.correlate(x, y, mode=2) if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y)) if maxlags is None: maxlags = Nx - 1 if maxlags >= Nx or maxlags < 1: raise ValueError('maglags must be None or strictly ' 'positive < %d'%Nx) lags = np.arange(-maxlags,maxlags+1) c = c[Nx-1-maxlags:Nx+maxlags] if usevlines: a = self.vlines(lags, [0], c, **kwargs) b = self.axhline(**kwargs) else: kwargs.setdefault('marker', 'o') kwargs.setdefault('linestyle', 'None') a, = self.plot(lags, c, **kwargs) b = None return lags, c, a, b xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd def legend(self, *args, **kwargs): """ call signature:: legend(*args, **kwargs) Place a legend on the current axes at location *loc*. Labels are a sequence of strings and *loc* can be a string or an integer specifying the legend location. To make a legend with existing lines:: legend() :meth:`legend` by itself will try and build a legend using the label property of the lines/patches/collections. You can set the label of a line by doing:: plot(x, y, label='my data') or:: line.set_label('my data'). If label is set to '_nolegend_', the item will not be shown in legend. To automatically generate the legend from labels:: legend( ('label1', 'label2', 'label3') ) To make a legend for a list of lines and labels:: legend( (line1, line2, line3), ('label1', 'label2', 'label3') ) To make a legend at a given location, using a location argument:: legend( ('label1', 'label2', 'label3'), loc='upper left') or:: legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2) The location codes are =============== ============= Location String Location Code =============== ============= 'best' 0 'upper right' 1 'upper left' 2 'lower left' 3 'lower right' 4 'right' 5 'center left' 6 'center right' 7 'lower center' 8 'upper center' 9 'center' 10 =============== ============= If none of these are locations are suitable, loc can be a 2-tuple giving x,y in axes coords, ie:: loc = 0, 1 # left top loc = 0.5, 0.5 # center Keyword arguments: *isaxes*: [ True | False ] Indicates that this is an axes legend *numpoints*: integer The number of points in the legend line, default is 4 *prop*: [ None | FontProperties ] A :class:`matplotlib.font_manager.FontProperties` instance, or *None* to use rc settings. *pad*: [ None | scalar ] The fractional whitespace inside the legend border, between 0 and 1. If *None*, use rc settings. *markerscale*: [ None | scalar ] The relative size of legend markers vs. original. If *None*, use rc settings. *shadow*: [ None | False | True ] If *True*, draw a shadow behind legend. If *None*, use rc settings. *labelsep*: [ None | scalar ] The vertical space between the legend entries. If *None*, use rc settings. *handlelen*: [ None | scalar ] The length of the legend lines. If *None*, use rc settings. *handletextsep*: [ None | scalar ] The space between the legend line and legend text. If *None*, use rc settings. *axespad*: [ None | scalar ] The border between the axes and legend edge. If *None*, use rc settings. **Example:** .. plot:: mpl_examples/api/legend_demo.py """ def get_handles(): handles = self.lines[:] handles.extend(self.patches) handles.extend([c for c in self.collections if isinstance(c, mcoll.LineCollection)]) handles.extend([c for c in self.collections if isinstance(c, mcoll.RegularPolyCollection)]) return handles if len(args)==0: handles = [] labels = [] for handle in get_handles(): label = handle.get_label() if (label is not None and label != '' and not label.startswith('_')): handles.append(handle) labels.append(label) if len(handles) == 0: warnings.warn("No labeled objects found. " "Use label='...' kwarg on individual plots.") return None elif len(args)==1: # LABELS labels = args[0] handles = [h for h, label in zip(get_handles(), labels)] elif len(args)==2: if is_string_like(args[1]) or isinstance(args[1], int): # LABELS, LOC labels, loc = args handles = [h for h, label in zip(get_handles(), labels)] kwargs['loc'] = loc else: # LINES, LABELS handles, labels = args elif len(args)==3: # LINES, LABELS, LOC handles, labels, loc = args kwargs['loc'] = loc else: raise TypeError('Invalid arguments to legend') handles = cbook.flatten(handles) self.legend_ = mlegend.Legend(self, handles, labels, **kwargs) return self.legend_ #### Specialized plotting def step(self, x, y, *args, **kwargs): ''' call signature:: step(x, y, *args, **kwargs) Make a step plot. Additional keyword args to :func:`step` are the same as those for :func:`~matplotlib.pyplot.plot`. *x* and *y* must be 1-D sequences, and it is assumed, but not checked, that *x* is uniformly increasing. Keyword arguments: *where*: [ 'pre' | 'post' | 'mid' ] If 'pre', the interval from x[i] to x[i+1] has level y[i] If 'post', that interval has level y[i+1] If 'mid', the jumps in *y* occur half-way between the *x*-values. ''' where = kwargs.pop('where', 'pre') if where not in ('pre', 'post', 'mid'): raise ValueError("'where' argument to step must be " "'pre', 'post' or 'mid'") kwargs['linestyle'] = 'steps-' + where return self.plot(x, y, *args, **kwargs) def bar(self, left, height, width=0.8, bottom=None, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False, **kwargs ): """ call signature:: bar(left, height, width=0.8, bottom=0, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False) Make a bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *left*, *height*, *width*, and *bottom* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== =============================================== Argument Description ======== =============================================== *left* the x coordinates of the left sides of the bars *height* the heights of the bars ======== =============================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *width* the widths of the bars *bottom* the y coordinates of the bottom edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *orientation* 'vertical' | 'horizontal' *log* [False|True] False (default) leaves the orientation axis as-is; True sets it to log scale =============== ========================================== For vertical bars, *align* = 'edge' aligns bars by their left edges in left, while *align* = 'center' interprets these values as the *x* coordinates of the bar centers. For horizontal bars, *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use bar as the basis for stacked bar charts, or candlestick plots. Other optional kwargs: %(Rectangle)s **Example:** A stacked bar chart. .. plot:: mpl_examples/pylab_examples/bar_stacked.py """ if not self._hold: self.cla() label = kwargs.pop('label', '') def make_iterable(x): if not iterable(x): return [x] else: return x # make them safe to take len() of _left = left left = make_iterable(left) height = make_iterable(height) width = make_iterable(width) _bottom = bottom bottom = make_iterable(bottom) linewidth = make_iterable(linewidth) adjust_ylim = False adjust_xlim = False if orientation == 'vertical': self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs) if log: self.set_yscale('log') # size width and bottom according to length of left if _bottom is None: if self.get_yscale() == 'log': bottom = [1e-100] adjust_ylim = True else: bottom = [0] nbars = len(left) if len(width) == 1: width *= nbars if len(bottom) == 1: bottom *= nbars elif orientation == 'horizontal': self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs) if log: self.set_xscale('log') # size left and height according to length of bottom if _left is None: if self.get_xscale() == 'log': left = [1e-100] adjust_xlim = True else: left = [0] nbars = len(bottom) if len(left) == 1: left *= nbars if len(height) == 1: height *= nbars else: raise ValueError, 'invalid orientation: %s' % orientation # do not convert to array here as unit info is lost #left = np.asarray(left) #height = np.asarray(height) #width = np.asarray(width) #bottom = np.asarray(bottom) if len(linewidth) < nbars: linewidth *= nbars if color is None: color = [None] * nbars else: color = list(mcolors.colorConverter.to_rgba_array(color)) if len(color) < nbars: color *= nbars if edgecolor is None: edgecolor = [None] * nbars else: edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor)) if len(edgecolor) < nbars: edgecolor *= nbars if yerr is not None: if not iterable(yerr): yerr = [yerr]*nbars if xerr is not None: if not iterable(xerr): xerr = [xerr]*nbars # FIXME: convert the following to proper input validation # raising ValueError; don't use assert for this. assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars assert len(height)==nbars, ("argument 'height' must be %d or scalar" % nbars) assert len(width)==nbars, ("argument 'width' must be %d or scalar" % nbars) assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" % nbars) if yerr is not None and len(yerr)!=nbars: raise ValueError( "bar() argument 'yerr' must be len(%s) or scalar" % nbars) if xerr is not None and len(xerr)!=nbars: raise ValueError( "bar() argument 'xerr' must be len(%s) or scalar" % nbars) patches = [] # lets do some conversions now since some types cannot be # subtracted uniformly if self.xaxis is not None: xconv = self.xaxis.converter if xconv is not None: units = self.xaxis.get_units() left = xconv.convert( left, units ) width = xconv.convert( width, units ) if self.yaxis is not None: yconv = self.yaxis.converter if yconv is not None : units = self.yaxis.get_units() bottom = yconv.convert( bottom, units ) height = yconv.convert( height, units ) if align == 'edge': pass elif align == 'center': if orientation == 'vertical': left = [left[i] - width[i]/2. for i in xrange(len(left))] elif orientation == 'horizontal': bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))] else: raise ValueError, 'invalid alignment: %s' % align args = zip(left, bottom, width, height, color, edgecolor, linewidth) for l, b, w, h, c, e, lw in args: if h<0: b += h h = abs(h) if w<0: l += w w = abs(w) r = mpatches.Rectangle( xy=(l, b), width=w, height=h, facecolor=c, edgecolor=e, linewidth=lw, label=label ) label = '_nolegend_' r.update(kwargs) #print r.get_label(), label, 'label' in kwargs self.add_patch(r) patches.append(r) holdstate = self._hold self.hold(True) # ensure hold is on before plotting errorbars if xerr is not None or yerr is not None: if orientation == 'vertical': # using list comps rather than arrays to preserve unit info x = [l+0.5*w for l, w in zip(left, width)] y = [b+h for b,h in zip(bottom, height)] elif orientation == 'horizontal': # using list comps rather than arrays to preserve unit info x = [l+w for l,w in zip(left, width)] y = [b+0.5*h for b,h in zip(bottom, height)] self.errorbar( x, y, yerr=yerr, xerr=xerr, fmt=None, ecolor=ecolor, capsize=capsize) self.hold(holdstate) # restore previous hold state if adjust_xlim: xmin, xmax = self.dataLim.intervalx xmin = np.amin(width[width!=0]) # filter out the 0 width rects if xerr is not None: xmin = xmin - np.amax(xerr) xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) if adjust_ylim: ymin, ymax = self.dataLim.intervaly ymin = np.amin(height[height!=0]) # filter out the 0 height rects if yerr is not None: ymin = ymin - np.amax(yerr) ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() return patches bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd def barh(self, bottom, width, height=0.8, left=None, **kwargs): """ call signature:: barh(bottom, width, height=0.8, left=0, **kwargs) Make a horizontal bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *bottom*, *width*, *height*, and *left* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== ====================================================== Argument Description ======== ====================================================== *bottom* the vertical positions of the bottom edges of the bars *width* the lengths of the bars ======== ====================================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *height* the heights (thicknesses) of the bars *left* the x coordinates of the left edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *log* [False|True] False (default) leaves the horizontal axis as-is; True sets it to log scale =============== ========================================== Setting *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use barh as the basis for stacked bar charts, or candlestick plots. other optional kwargs: %(Rectangle)s """ patches = self.bar(left=left, height=height, width=width, bottom=bottom, orientation='horizontal', **kwargs) return patches barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd def broken_barh(self, xranges, yrange, **kwargs): """ call signature:: broken_barh(self, xranges, yrange, **kwargs) A collection of horizontal bars spanning *yrange* with a sequence of *xranges*. Required arguments: ========= ============================== Argument Description ========= ============================== *xranges* sequence of (*xmin*, *xwidth*) *yrange* sequence of (*ymin*, *ywidth*) ========= ============================== kwargs are :class:`matplotlib.collections.BrokenBarHCollection` properties: %(BrokenBarHCollection)s these can either be a single argument, ie:: facecolors = 'black' or a sequence of arguments for the various bars, ie:: facecolors = ('black', 'red', 'green') **Example:** .. plot:: mpl_examples/pylab_examples/broken_barh.py """ col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs) self.add_collection(col, autolim=True) self.autoscale_view() return col broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'): """ call signature:: stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-') A stem plot plots vertical lines (using *linefmt*) at each *x* location from the baseline to *y*, and places a marker there using *markerfmt*. A horizontal line at 0 is is plotted using *basefmt*. Return value is a tuple (*markerline*, *stemlines*, *baseline*). .. seealso:: `this document`__ for details :file:`examples/pylab_examples/stem_plot.py`: for a demo __ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html """ remember_hold=self._hold if not self._hold: self.cla() self.hold(True) markerline, = self.plot(x, y, markerfmt) stemlines = [] for thisx, thisy in zip(x, y): l, = self.plot([thisx,thisx], [0, thisy], linefmt) stemlines.append(l) baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt) self.hold(remember_hold) return markerline, stemlines, baseline def pie(self, x, explode=None, labels=None, colors=None, autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1): r""" call signature:: pie(x, explode=None, labels=None, colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'), autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False) Make a pie chart of array *x*. The fractional area of each wedge is given by x/sum(x). If sum(x) <= 1, then the values of x give the fractional area directly and the array will not be normalized. Keyword arguments: *explode*: [ None | len(x) sequence ] If not *None*, is a len(*x*) array which specifies the fraction of the radius with which to offset each wedge. *colors*: [ None | color sequence ] A sequence of matplotlib color args through which the pie chart will cycle. *labels*: [ None | len(x) sequence of strings ] A sequence of strings providing the labels for each wedge *autopct*: [ None | format string | format function ] If not *None*, is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be ``fmt%pct``. If it is a function, it will be called. *pctdistance*: scalar The ratio between the center of each pie slice and the start of the text generated by *autopct*. Ignored if *autopct* is *None*; default is 0.6. *labeldistance*: scalar The radial distance at which the pie labels are drawn *shadow*: [ False | True ] Draw a shadow beneath the pie. The pie chart will probably look best if the figure and axes are square. Eg.:: figure(figsize=(8,8)) ax = axes([0.1, 0.1, 0.8, 0.8]) Return value: If *autopct* is None, return the tuple (*patches*, *texts*): - *patches* is a sequence of :class:`matplotlib.patches.Wedge` instances - *texts* is a list of the label :class:`matplotlib.text.Text` instances. If *autopct* is not *None*, return the tuple (*patches*, *texts*, *autotexts*), where *patches* and *texts* are as above, and *autotexts* is a list of :class:`~matplotlib.text.Text` instances for the numeric labels. """ self.set_frame_on(False) x = np.asarray(x).astype(np.float32) sx = float(x.sum()) if sx>1: x = np.divide(x,sx) if labels is None: labels = ['']*len(x) if explode is None: explode = [0]*len(x) assert(len(x)==len(labels)) assert(len(x)==len(explode)) if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w') center = 0,0 radius = 1 theta1 = 0 i = 0 texts = [] slices = [] autotexts = [] for frac, label, expl in cbook.safezip(x,labels, explode): x, y = center theta2 = theta1 + frac thetam = 2*math.pi*0.5*(theta1+theta2) x += expl*math.cos(thetam) y += expl*math.sin(thetam) w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2, facecolor=colors[i%len(colors)]) slices.append(w) self.add_patch(w) w.set_label(label) if shadow: # make sure to add a shadow after the call to # add_patch so the figure and transform props will be # set shad = mpatches.Shadow(w, -0.02, -0.02, #props={'facecolor':w.get_facecolor()} ) shad.set_zorder(0.9*w.get_zorder()) self.add_patch(shad) xt = x + labeldistance*radius*math.cos(thetam) yt = y + labeldistance*radius*math.sin(thetam) label_alignment = xt > 0 and 'left' or 'right' t = self.text(xt, yt, label, size=rcParams['xtick.labelsize'], horizontalalignment=label_alignment, verticalalignment='center') texts.append(t) if autopct is not None: xt = x + pctdistance*radius*math.cos(thetam) yt = y + pctdistance*radius*math.sin(thetam) if is_string_like(autopct): s = autopct%(100.*frac) elif callable(autopct): s = autopct(100.*frac) else: raise TypeError( 'autopct must be callable or a format string') t = self.text(xt, yt, s, horizontalalignment='center', verticalalignment='center') autotexts.append(t) theta1 = theta2 i += 1 self.set_xlim((-1.25, 1.25)) self.set_ylim((-1.25, 1.25)) self.set_xticks([]) self.set_yticks([]) if autopct is None: return slices, texts else: return slices, texts, autotexts def errorbar(self, x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False, **kwargs): """ call signature:: errorbar(x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False) Plot *x* versus *y* with error deltas in *yerr* and *xerr*. Vertical errorbars are plotted if *yerr* is not *None*. Horizontal errorbars are plotted if *xerr* is not *None*. *x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a single error bar at *x*, *y*. Optional keyword arguments: *xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ] If a scalar number, len(N) array-like object, or an Nx1 array-like object, errorbars are drawn +/- value. If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and +column2 *fmt*: '-' The plot format symbol for *y*. If *fmt* is *None*, just plot the errorbars with no line symbols. This can be useful for creating a bar plot with errorbars. *ecolor*: [ None | mpl color ] a matplotlib color arg which gives the color the errorbar lines; if *None*, use the marker color. *elinewidth*: scalar the linewidth of the errorbar lines. If *None*, use the linewidth. *capsize*: scalar the size of the error bar caps in points *barsabove*: [ True | False ] if *True*, will plot the errorbars above the plot symbols. Default is below. *lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ] These arguments can be used to indicate that a value gives only upper/lower limits. In that case a caret symbol is used to indicate this. lims-arguments may be of the same type as *xerr* and *yerr*. All other keyword arguments are passed on to the plot command for the markers, so you can add additional key=value pairs to control the errorbar markers. For example, this code makes big red squares with thick green edges:: x,y,yerr = rand(3,10) errorbar(x, y, yerr, marker='s', mfc='red', mec='green', ms=20, mew=4) where *mfc*, *mec*, *ms* and *mew* are aliases for the longer property names, *markerfacecolor*, *markeredgecolor*, *markersize* and *markeredgewith*. valid kwargs for the marker properties are %(Line2D)s Return value is a length 3 tuple. The first element is the :class:`~matplotlib.lines.Line2D` instance for the *y* symbol lines. The second element is a list of error bar cap lines, the third element is a list of :class:`~matplotlib.collections.LineCollection` instances for the horizontal and vertical error ranges. **Example:** .. plot:: mpl_examples/pylab_examples/errorbar_demo.py """ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) if not self._hold: self.cla() # make sure all the args are iterable; use lists not arrays to # preserve units if not iterable(x): x = [x] if not iterable(y): y = [y] if xerr is not None: if not iterable(xerr): xerr = [xerr]*len(x) if yerr is not None: if not iterable(yerr): yerr = [yerr]*len(y) l0 = None if barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) barcols = [] caplines = [] lines_kw = {'label':'_nolegend_'} if elinewidth: lines_kw['linewidth'] = elinewidth else: if 'linewidth' in kwargs: lines_kw['linewidth']=kwargs['linewidth'] if 'lw' in kwargs: lines_kw['lw']=kwargs['lw'] if 'transform' in kwargs: lines_kw['transform'] = kwargs['transform'] # arrays fine here, they are booleans and hence not units if not iterable(lolims): lolims = np.asarray([lolims]*len(x), bool) else: lolims = np.asarray(lolims, bool) if not iterable(uplims): uplims = np.array([uplims]*len(x), bool) else: uplims = np.asarray(uplims, bool) if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool) else: xlolims = np.asarray(xlolims, bool) if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool) else: xuplims = np.asarray(xuplims, bool) def xywhere(xs, ys, mask): """ return xs[mask], ys[mask] where mask is True but xs and ys are not arrays """ assert len(xs)==len(ys) assert len(xs)==len(mask) xs = [thisx for thisx, b in zip(xs, mask) if b] ys = [thisy for thisy, b in zip(ys, mask) if b] return xs, ys if capsize > 0: plot_kw = { 'ms':2*capsize, 'label':'_nolegend_'} if 'markeredgewidth' in kwargs: plot_kw['markeredgewidth']=kwargs['markeredgewidth'] if 'mew' in kwargs: plot_kw['mew']=kwargs['mew'] if 'transform' in kwargs: plot_kw['transform'] = kwargs['transform'] if xerr is not None: if (iterable(xerr) and len(xerr)==2 and iterable(xerr[0]) and iterable(xerr[1])): # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[0])] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[1])] else: # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] barcols.append( self.hlines(y, left, right, **lines_kw ) ) if capsize > 0: if xlolims.any(): # can't use numpy logical indexing since left and # y are lists leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, ls='None', marker=mlines.CARETLEFT, **plot_kw) ) xlolims = ~xlolims leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) ) else: caplines.extend( self.plot(left, y, 'k|', **plot_kw) ) if xuplims.any(): rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, ls='None', marker=mlines.CARETRIGHT, **plot_kw) ) xuplims = ~xuplims rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) ) else: caplines.extend( self.plot(right, y, 'k|', **plot_kw) ) if yerr is not None: if (iterable(yerr) and len(yerr)==2 and iterable(yerr[0]) and iterable(yerr[1])): # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[0])] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[1])] else: # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] barcols.append( self.vlines(x, lower, upper, **lines_kw) ) if capsize > 0: if lolims.any(): xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, ls='None', marker=mlines.CARETDOWN, **plot_kw) ) lolims = ~lolims xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, lower, 'k_', **plot_kw) ) if uplims.any(): xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, ls='None', marker=mlines.CARETUP, **plot_kw) ) uplims = ~uplims xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, upper, 'k_', **plot_kw) ) if not barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) if ecolor is None: if l0 is None: ecolor = self._get_lines._get_next_cycle_color() else: ecolor = l0.get_color() for l in barcols: l.set_color(ecolor) for l in caplines: l.set_color(ecolor) self.autoscale_view() return (l0, caplines, barcols) errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5, positions=None, widths=None): """ call signature:: boxplot(x, notch=0, sym='+', vert=1, whis=1.5, positions=None, widths=None) Make a box and whisker plot for each column of *x* or each vector in sequence *x*. The box extends from the lower to upper quartile values of the data, with a line at the median. The whiskers extend from the box to show the range of the data. Flier points are those past the end of the whiskers. - *notch* = 0 (default) produces a rectangular box plot. - *notch* = 1 will produce a notched box plot *sym* (default 'b+') is the default symbol for flier points. Enter an empty string ('') if you don't want to show fliers. - *vert* = 1 (default) makes the boxes vertical. - *vert* = 0 makes horizontal boxes. This seems goofy, but that's how Matlab did it. *whis* (default 1.5) defines the length of the whiskers as a function of the inner quartile range. They extend to the most extreme data point within ( ``whis*(75%-25%)`` ) data range. *positions* (default 1,2,...,n) sets the horizontal positions of the boxes. The ticks and limits are automatically set to match the positions. *widths* is either a scalar or a vector and sets the width of each box. The default is 0.5, or ``0.15*(distance between extreme positions)`` if that is smaller. *x* is an array or a sequence of vectors. Returns a dictionary mapping each component of the boxplot to a list of the :class:`matplotlib.lines.Line2D` instances created. **Example:** .. plot:: pyplots/boxplot_demo.py """ if not self._hold: self.cla() holdStatus = self._hold whiskers, caps, boxes, medians, fliers = [], [], [], [], [] # convert x to a list of vectors if hasattr(x, 'shape'): if len(x.shape) == 1: if hasattr(x[0], 'shape'): x = list(x) else: x = [x,] elif len(x.shape) == 2: nr, nc = x.shape if nr == 1: x = [x] elif nc == 1: x = [x.ravel()] else: x = [x[:,i] for i in xrange(nc)] else: raise ValueError, "input x can have no more than 2 dimensions" if not hasattr(x[0], '__len__'): x = [x] col = len(x) # get some plot info if positions is None: positions = range(1, col + 1) if widths is None: distance = max(positions) - min(positions) widths = min(0.15*max(distance,1.0), 0.5) if isinstance(widths, float) or isinstance(widths, int): widths = np.ones((col,), float) * widths # loop through columns, adding each to plot self.hold(True) for i,pos in enumerate(positions): d = np.ravel(x[i]) row = len(d) # get median and quartiles q1, med, q3 = mlab.prctile(d,[25,50,75]) # get high extreme iq = q3 - q1 hi_val = q3 + whis*iq wisk_hi = np.compress( d <= hi_val , d ) if len(wisk_hi) == 0: wisk_hi = q3 else: wisk_hi = max(wisk_hi) # get low extreme lo_val = q1 - whis*iq wisk_lo = np.compress( d >= lo_val, d ) if len(wisk_lo) == 0: wisk_lo = q1 else: wisk_lo = min(wisk_lo) # get fliers - if we are showing them flier_hi = [] flier_lo = [] flier_hi_x = [] flier_lo_x = [] if len(sym) != 0: flier_hi = np.compress( d > wisk_hi, d ) flier_lo = np.compress( d < wisk_lo, d ) flier_hi_x = np.ones(flier_hi.shape[0]) * pos flier_lo_x = np.ones(flier_lo.shape[0]) * pos # get x locations for fliers, whisker, whisker cap and box sides box_x_min = pos - widths[i] * 0.5 box_x_max = pos + widths[i] * 0.5 wisk_x = np.ones(2) * pos cap_x_min = pos - widths[i] * 0.25 cap_x_max = pos + widths[i] * 0.25 cap_x = [cap_x_min, cap_x_max] # get y location for median med_y = [med, med] # calculate 'regular' plot if notch == 0: # make our box vectors box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ] box_y = [q1, q1, q3, q3, q1 ] # make our median line vectors med_x = [box_x_min, box_x_max] # calculate 'notch' plot else: notch_max = med + 1.57*iq/np.sqrt(row) notch_min = med - 1.57*iq/np.sqrt(row) if notch_max > q3: notch_max = q3 if notch_min < q1: notch_min = q1 # make our notched box vectors box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max, box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min, box_x_min ] box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max, med, notch_min, q1] # make our median line vectors med_x = [cap_x_min, cap_x_max] med_y = [med, med] # vertical or horizontal plot? if vert: def doplot(*args): return self.plot(*args) else: def doplot(*args): shuffled = [] for i in xrange(0, len(args), 3): shuffled.extend([args[i+1], args[i], args[i+2]]) return self.plot(*shuffled) whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--', wisk_x, [q3, wisk_hi], 'b--')) caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-', cap_x, [wisk_lo, wisk_lo], 'k-')) boxes.extend(doplot(box_x, box_y, 'b-')) medians.extend(doplot(med_x, med_y, 'r-')) fliers.extend(doplot(flier_hi_x, flier_hi, sym, flier_lo_x, flier_lo, sym)) # fix our axes/ticks up a little if 1 == vert: setticks, setlim = self.set_xticks, self.set_xlim else: setticks, setlim = self.set_yticks, self.set_ylim newlimits = min(positions)-0.5, max(positions)+0.5 setlim(newlimits) setticks(positions) # reset hold status self.hold(holdStatus) return dict(whiskers=whiskers, caps=caps, boxes=boxes, medians=medians, fliers=fliers) def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, faceted=True, verts=None, **kwargs): """ call signatures:: scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, verts=None, **kwargs) Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. Keyword arguments: *s*: size in points^2. It is a scalar or an array of the same length as *x* and *y*. *c*: a color. *c* can be a single color format string, or a sequence of color specifications of length *N*, or a sequence of *N* numbers to be mapped to colors using the *cmap* and *norm* specified via kwargs (see below). Note that *c* should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. *c* can be a 2-D array in which the rows are RGB or RGBA, however. *marker*: can be one of: ===== ============== Value Description ===== ============== 's' square 'o' circle '^' triangle up '>' triangle right 'v' triangle down '<' triangle left 'd' diamond 'p' pentagram 'h' hexagon '8' octagon '+' plus 'x' cross ===== ============== The marker can also be a tuple (*numsides*, *style*, *angle*), which will create a custom, regular symbol. *numsides*: the number of sides *style*: the style of the regular symbol: ===== ============================================= Value Description ===== ============================================= 0 a regular polygon 1 a star-like symbol 2 an asterisk 3 a circle (*numsides* and *angle* is ignored) ===== ============================================= *angle*: the angle of rotation of the symbol Finally, *marker* can be (*verts*, 0): *verts* is a sequence of (*x*, *y*) vertices for a custom scatter symbol. Alternatively, use the kwarg combination *marker* = *None*, *verts* = *verts*. Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which case all masks will be combined and only unmasked points will be plotted. Other keyword arguments: the color mapping and normalization arguments will be used only if *c* is an array of floats. *cmap*: [ None | Colormap ] A :class:`matplotlib.colors.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *cmap* is only used if *c* is an array of floats. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0, 1. If *None*, use the default :func:`normalize`. *norm* is only used if *c* is an array of floats. *vmin*/*vmax*: *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are None, the min and max of the color array *C* is used. Note if you pass a *norm* instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: 0 <= scalar <= 1 The alpha value for the patches *linewidths*: [ None | scalar | sequence ] If *None*, defaults to (lines.linewidth,). Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Optional kwargs control the :class:`~matplotlib.collections.Collection` properties; in particular: *edgecolors*: 'none' to plot faces with no outlines *facecolors*: 'none' to plot unfilled outlines Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s A :class:`~matplotlib.collections.Collection` instance is returned. """ if not self._hold: self.cla() syms = { # a dict from symbol to (numsides, angle) 's' : (4,math.pi/4.0,0), # square 'o' : (20,3,0), # circle '^' : (3,0,0), # triangle up '>' : (3,math.pi/2.0,0), # triangle right 'v' : (3,math.pi,0), # triangle down '<' : (3,3*math.pi/2.0,0), # triangle left 'd' : (4,0,0), # diamond 'p' : (5,0,0), # pentagram 'h' : (6,0,0), # hexagon '8' : (8,0,0), # octagon '+' : (4,0,2), # plus 'x' : (4,math.pi/4.0,2) # cross } self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, s, c = cbook.delete_masked_points(x, y, s, c) if is_string_like(c) or cbook.is_sequence_of_strings(c): colors = mcolors.colorConverter.to_rgba_array(c, alpha) else: sh = np.shape(c) # The inherent ambiguity is resolved in favor of color # mapping, not interpretation as rgb or rgba: if len(sh) == 1 and sh[0] == len(x): colors = None # use cmap, norm after collection is created else: colors = mcolors.colorConverter.to_rgba_array(c, alpha) if not iterable(s): scales = (s,) else: scales = s if faceted: edgecolors = None else: edgecolors = 'none' warnings.warn( '''replace "faceted=False" with "edgecolors='none'"''', DeprecationWarning) #2008/04/18 sym = None symstyle = 0 # to be API compatible if marker is None and not (verts is None): marker = (verts, 0) verts = None if is_string_like(marker): # the standard way to define symbols using a string character sym = syms.get(marker) if sym is None and verts is None: raise ValueError('Unknown marker symbol to scatter') numsides, rotation, symstyle = syms[marker] elif iterable(marker): # accept marker to be: # (numsides, style, [angle]) # or # (verts[], style, [angle]) if len(marker)<2 or len(marker)>3: raise ValueError('Cannot create markersymbol from marker') if cbook.is_numlike(marker[0]): # (numsides, style, [angle]) if len(marker)==2: numsides, rotation = marker[0], 0. elif len(marker)==3: numsides, rotation = marker[0], marker[2] sym = True if marker[1] in (1,2): symstyle = marker[1] else: verts = np.asarray(marker[0]) if sym is not None: if symstyle==0: collection = mcoll.RegularPolyCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==1: collection = mcoll.StarPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==2: collection = mcoll.AsteriskPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==3: collection = mcoll.CircleCollection( scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) else: rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2)) verts /= rescale collection = mcoll.PolyCollection( (verts,), scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) collection.set_transform(mtransforms.IdentityTransform()) collection.set_alpha(alpha) collection.update(kwargs) if colors is None: if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(np.asarray(c)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() temp_x = x temp_y = y minx = np.amin(temp_x) maxx = np.amax(temp_x) miny = np.amin(temp_y) maxy = np.amax(temp_y) w = maxx-minx h = maxy-miny # the pad is a little hack to deal with the fact that we don't # want to transform all the symbols whose scales are in points # to data coords to get the exact bounding box for efficiency # reasons. It can be done right if this is deemed important padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd def hexbin(self, x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none', reduce_C_function = np.mean, **kwargs): """ call signature:: hexbin(x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none' reduce_C_function = np.mean, **kwargs) Make a hexagonal binning plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. If *C* is None (the default), this is a histogram of the number of occurences of the observations at (x[i],y[i]). If *C* is specified, it specifies values at the coordinate (x[i],y[i]). These values are accumulated for each hexagonal bin and then reduced according to *reduce_C_function*, which defaults to numpy's mean function (np.mean). (If *C* is specified, it must also be a 1-D sequence of the same length as *x* and *y*.) *x*, *y* and/or *C* may be masked arrays, in which case only unmasked points will be plotted. Optional keyword arguments: *gridsize*: [ 100 | integer ] The number of hexagons in the *x*-direction, default is 100. The corresponding number of hexagons in the *y*-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the *x*-direction and the *y*-direction. *bins*: [ None | 'log' | integer | sequence ] If *None*, no binning is applied; the color of each hexagon directly corresponds to its count value. If 'log', use a logarithmic scale for the color map. Internally, :math:`log_{10}(i+1)` is used to determine the hexagon color. If an integer, divide the counts in the specified number of bins, and color the hexagons accordingly. If a sequence of values, the values of the lower bound of the bins to be used. *xscale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the horizontal axis. *scale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the vertical axis. Other keyword arguments controlling color mapping and normalization arguments: *cmap*: [ None | Colormap ] a :class:`matplotlib.cm.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *norm*: [ None | Normalize ] :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. *vmin*/*vmax*: scalar *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. Note if you pass a norm instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: scalar the alpha value for the patches *linewidths*: [ None | scalar ] If *None*, defaults to rc lines.linewidth. Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Other keyword arguments controlling the Collection properties: *edgecolors*: [ None | mpl color | color sequence ] If 'none', draws the edges in the same color as the fill color. This is the default, as it avoids unsightly unpainted pixels between the hexagons. If *None*, draws the outlines in the default color. If a matplotlib color arg or sequence of rgba tuples, draws the outlines in the specified color. Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s The return value is a :class:`~matplotlib.collections.PolyCollection` instance; use :meth:`~matplotlib.collection.PolyCollection.get_array` on this :class:`~matplotlib.collections.PolyCollection` to get the counts in each hexagon. **Example:** .. plot:: mpl_examples/pylab_examples/hexbin_demo.py """ if not self._hold: self.cla() self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, C = cbook.delete_masked_points(x, y, C) # Set the size of the hexagon grid if iterable(gridsize): nx, ny = gridsize else: nx = gridsize ny = int(nx/math.sqrt(3)) # Count the number of data in each hexagon x = np.array(x, float) y = np.array(y, float) if xscale=='log': x = np.log10(x) if yscale=='log': y = np.log10(y) xmin = np.amin(x) xmax = np.amax(x) ymin = np.amin(y) ymax = np.amax(y) # In the x-direction, the hexagons exactly cover the region from # xmin to xmax. Need some padding to avoid roundoff errors. padding = 1.e-9 * (xmax - xmin) xmin -= padding xmax += padding sx = (xmax-xmin) / nx sy = (ymax-ymin) / ny x = (x-xmin)/sx y = (y-ymin)/sy ix1 = np.round(x).astype(int) iy1 = np.round(y).astype(int) ix2 = np.floor(x).astype(int) iy2 = np.floor(y).astype(int) nx1 = nx + 1 ny1 = ny + 1 nx2 = nx ny2 = ny n = nx1*ny1+nx2*ny2 d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2 d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2 bdist = (d1<d2) if C is None: accum = np.zeros(n) # Create appropriate views into "accum" array. lattice1 = accum[:nx1*ny1] lattice2 = accum[nx1*ny1:] lattice1.shape = (nx1,ny1) lattice2.shape = (nx2,ny2) for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]]+=1 else: lattice2[ix2[i], iy2[i]]+=1 else: # create accumulation arrays lattice1 = np.empty((nx1,ny1),dtype=object) for i in xrange(nx1): for j in xrange(ny1): lattice1[i,j] = [] lattice2 = np.empty((nx2,ny2),dtype=object) for i in xrange(nx2): for j in xrange(ny2): lattice2[i,j] = [] for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]].append( C[i] ) else: lattice2[ix2[i], iy2[i]].append( C[i] ) for i in xrange(nx1): for j in xrange(ny1): vals = lattice1[i,j] if len(vals): lattice1[i,j] = reduce_C_function( vals ) else: lattice1[i,j] = np.nan for i in xrange(nx2): for j in xrange(ny2): vals = lattice2[i,j] if len(vals): lattice2[i,j] = reduce_C_function( vals ) else: lattice2[i,j] = np.nan accum = np.hstack(( lattice1.astype(float).ravel(), lattice2.astype(float).ravel())) good_idxs = ~np.isnan(accum) px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0]) py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0 polygons = np.zeros((6, n, 2), float) polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1) polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1) polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2) polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5 if C is not None: # remove accumulation bins with no data polygons = polygons[:,good_idxs,:] accum = accum[good_idxs] polygons = np.transpose(polygons, axes=[1,0,2]) polygons[:,:,0] *= sx polygons[:,:,1] *= sy polygons[:,:,0] += px polygons[:,:,1] += py if xscale=='log': polygons[:,:,0] = 10**(polygons[:,:,0]) xmin = 10**xmin xmax = 10**xmax self.set_xscale('log') if yscale=='log': polygons[:,:,1] = 10**(polygons[:,:,1]) ymin = 10**ymin ymax = 10**ymax self.set_yscale('log') if edgecolors=='none': edgecolors = 'face' collection = mcoll.PolyCollection( polygons, edgecolors = edgecolors, linewidths = linewidths, transOffset = self.transData, ) # Transform accum if needed if bins=='log': accum = np.log10(accum+1) elif bins!=None: if not iterable(bins): minimum, maximum = min(accum), max(accum) bins-=1 # one less edge than bins bins = minimum + (maximum-minimum)*np.arange(bins)/bins bins = np.sort(bins) accum = bins.searchsorted(accum) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(accum) collection.set_cmap(cmap) collection.set_norm(norm) collection.set_alpha(alpha) collection.update(kwargs) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() corners = ((xmin, ymin), (xmax, ymax)) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd def arrow(self, x, y, dx, dy, **kwargs): """ call signature:: arrow(x, y, dx, dy, **kwargs) Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*, *y* + *dy*). Optional kwargs control the arrow properties: %(FancyArrow)s **Example:** .. plot:: mpl_examples/pylab_examples/arrow_demo.py """ a = mpatches.FancyArrow(x, y, dx, dy, **kwargs) self.add_artist(a) return a arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd def quiverkey(self, *args, **kw): qk = mquiver.QuiverKey(*args, **kw) self.add_artist(qk) return qk quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc def quiver(self, *args, **kw): if not self._hold: self.cla() q = mquiver.Quiver(self, *args, **kw) self.add_collection(q, False) self.update_datalim(q.XY) self.autoscale_view() return q quiver.__doc__ = mquiver.Quiver.quiver_doc def barbs(self, *args, **kw): """ %(barbs_doc)s **Example:** .. plot:: mpl_examples/pylab_examples/barb_demo.py """ if not self._hold: self.cla() b = mquiver.Barbs(self, *args, **kw) self.add_collection(b) self.update_datalim(b.get_offsets()) self.autoscale_view() return b barbs.__doc__ = cbook.dedent(barbs.__doc__) % { 'barbs_doc': mquiver.Barbs.barbs_doc} def fill(self, *args, **kwargs): """ call signature:: fill(*args, **kwargs) Plot filled polygons. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional color format string; see :func:`~matplotlib.pyplot.plot` for details on the argument parsing. For example, to plot a polygon with vertices at *x*, *y* in blue.:: ax.fill(x,y, 'b' ) An arbitrary number of *x*, *y*, *color* groups can be specified:: ax.fill(x1, y1, 'g', x2, y2, 'r') Return value is a list of :class:`~matplotlib.patches.Patch` instances that were added. The same color strings that :func:`~matplotlib.pyplot.plot` supports are supported by the fill format string. If you would like to fill below a curve, eg. shade a region between 0 and *y* along *x*, use :meth:`fill_between` The *closed* kwarg will close the polygon when *True* (default). kwargs control the Polygon properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/fill_demo.py """ if not self._hold: self.cla() patches = [] for poly in self._get_patches_for_fill(*args, **kwargs): self.add_patch( poly ) patches.append( poly ) self.autoscale_view() return patches fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd def fill_between(self, x, y1, y2=0, where=None, **kwargs): """ call signature:: fill_between(x, y1, y2=0, where=None, **kwargs) Create a :class:`~matplotlib.collections.PolyCollection` filling the regions between *y1* and *y2* where ``where==True`` *x* an N length np array of the x data *y1* an N length scalar or np array of the x data *y2* an N length scalar or np array of the x data *where* if None, default to fill between everywhere. If not None, it is a a N length numpy boolean array and the fill will only happen over the regions where ``where==True`` *kwargs* keyword args passed on to the :class:`PolyCollection` kwargs control the Polygon properties: %(PolyCollection)s .. plot:: mpl_examples/pylab_examples/fill_between.py """ # Handle united data, such as dates self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs) self._process_unit_info(ydata=y2) # Convert the arrays so we can work with them x = np.asarray(self.convert_xunits(x)) y1 = np.asarray(self.convert_yunits(y1)) y2 = np.asarray(self.convert_yunits(y2)) if not cbook.iterable(y1): y1 = np.ones_like(x)*y1 if not cbook.iterable(y2): y2 = np.ones_like(x)*y2 if where is None: where = np.ones(len(x), np.bool) where = np.asarray(where) assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where)) polys = [] for ind0, ind1 in mlab.contiguous_regions(where): theseverts = [] xslice = x[ind0:ind1] y1slice = y1[ind0:ind1] y2slice = y2[ind0:ind1] if not len(xslice): continue N = len(xslice) X = np.zeros((2*N+2, 2), np.float) # the purpose of the next two lines is for when y2 is a # scalar like 0 and we want the fill to go all the way # down to 0 even if none of the y1 sample points do X[0] = xslice[0], y2slice[0] X[N+1] = xslice[-1], y2slice[-1] X[1:N+1,0] = xslice X[1:N+1,1] = y1slice X[N+2:,0] = xslice[::-1] X[N+2:,1] = y2slice[::-1] polys.append(X) collection = mcoll.PolyCollection(polys, **kwargs) # now update the datalim and autoscale XY1 = np.array([x[where], y1[where]]).T XY2 = np.array([x[where], y2[where]]).T self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits, updatex=True, updatey=True) self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits, updatex=False, updatey=True) self.add_collection(collection) self.autoscale_view() return collection fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd #### plotting z(x,y): imshow, pcolor and relatives, contour def imshow(self, X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, **kwargs): """ call signature:: imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, **kwargs) Display the image in *X* to current axes. *X* may be a float array, a uint8 array or a PIL image. If *X* is an array, *X* can have the following shapes: * MxN -- luminance (grayscale, float array only) * MxNx3 -- RGB (float or uint8 array) * MxNx4 -- RGBA (float or uint8 array) The value for each component of MxNx3 and MxNx4 float arrays should be in the range 0.0 to 1.0; MxN float arrays may be normalised. An :class:`matplotlib.image.AxesImage` instance is returned. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet. If *None*, default to rc ``image.cmap`` value. *cmap* is ignored when *X* has RGB(A) information *aspect*: [ None | 'auto' | 'equal' | scalar ] If 'auto', changes the image aspect ratio to match that of the axes If 'equal', and *extent* is *None*, changes the axes aspect ratio to match that of the image. If *extent* is not *None*, the axes aspect ratio is changed to match that of the extent. If *None*, default to rc ``image.aspect`` value. *interpolation*: Acceptable values are *None*, 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', If *interpolation* is *None*, default to rc ``image.interpolation``. See also the *filternorm* and *filterrad* parameters *norm*: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance; if *None*, default is ``normalization()``. This scales luminance -> 0-1 *norm* is only used for an MxN float array. *vmin*/*vmax*: [ None | scalar ] Used to scale a luminance image to 0-1. If either is *None*, the min and max of the luminance values will be used. Note if *norm* is not *None*, the settings for *vmin* and *vmax* will be ignored. *alpha*: scalar The alpha blending value, between 0 (transparent) and 1 (opaque) *origin*: [ None | 'upper' | 'lower' ] Place the [0,0] index of the array in the upper left or lower left corner of the axes. If *None*, default to rc ``image.origin``. *extent*: [ None | scalars (left, right, bottom, top) ] Eata values of the axes. The default assigns zero-based row, column indices to the *x*, *y* centers of the pixels. *shape*: [ None | scalars (columns, rows) ] For raw buffer images *filternorm*: A parameter for the antigrain image resize filter. From the antigrain documentation, if *filternorm* = 1, the filter normalizes integer values and corrects the rounding errors. It doesn't do anything with the source floating point values, it corrects only integers according to the rule of 1.0 which means that any sum of pixel weights must be equal to 1.0. So, the filter function must produce a graph of the proper shape. *filterrad*: The filter radius for filters that have a radius parameter, i.e. when interpolation is one of: 'sinc', 'lanczos' or 'blackman' Additional kwargs are :class:`~matplotlib.artist.Artist` properties: %(Artist)s **Example:** .. plot:: mpl_examples/pylab_examples/image_demo.py """ if not self._hold: self.cla() if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) if aspect is None: aspect = rcParams['image.aspect'] self.set_aspect(aspect) im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent, filternorm=filternorm, filterrad=filterrad, resample=resample, **kwargs) im.set_data(X) im.set_alpha(alpha) self._set_artist_props(im) im.set_clip_path(self.patch) #if norm is None and shape is None: # im.set_clim(vmin, vmax) if vmin is not None or vmax is not None: im.set_clim(vmin, vmax) else: im.autoscale_None() im.set_url(url) xmin, xmax, ymin, ymax = im.get_extent() corners = (xmin, ymin), (xmax, ymax) self.update_datalim(corners) if self._autoscaleon: self.set_xlim((xmin, xmax)) self.set_ylim((ymin, ymax)) self.images.append(im) return im imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd def _pcolorargs(self, funcname, *args): if len(args)==1: C = args[0] numRows, numCols = C.shape X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) ) elif len(args)==3: X, Y, C = args else: raise TypeError( 'Illegal arguments to %s; see help(%s)' % (funcname, funcname)) Nx = X.shape[-1] Ny = Y.shape[0] if len(X.shape) <> 2 or X.shape[0] == 1: x = X.reshape(1,Nx) X = x.repeat(Ny, axis=0) if len(Y.shape) <> 2 or Y.shape[1] == 1: y = Y.reshape(Ny, 1) Y = y.repeat(Nx, axis=1) if X.shape != Y.shape: raise TypeError( 'Incompatible X, Y inputs to %s; see help(%s)' % ( funcname, funcname)) return X, Y, C def pcolor(self, *args, **kwargs): """ call signatures:: pcolor(C, **kwargs) pcolor(X, Y, C, **kwargs) Create a pseudocolor plot of a 2-D array. *C* is the array of color values. *X* and *Y*, if given, specify the (*x*, *y*) coordinates of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at:: (X[i, j], Y[i, j]), (X[i, j+1], Y[i, j+1]), (X[i+1, j], Y[i+1, j]), (X[i+1, j+1], Y[i+1, j+1]). Ideally the dimensions of *X* and *Y* should be one greater than those of *C*; if the dimensions are the same, then the last row and column of *C* will be ignored. Note that the the column index corresponds to the *x*-coordinate, and the row index corresponds to *y*; for details, see the :ref:`Grid Orientation <axes-pcolor-grid-orientation>` section below. If either or both of *X* and *Y* are 1-D arrays or column vectors, they will be expanded as needed into the appropriate 2-D arrays, making a rectangular grid. *X*, *Y* and *C* may be masked arrays. If either C[i, j], or one of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j], [i, j+1],[i+1, j+1]) is masked, nothing is plotted. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If *None*, use rc settings. norm: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If *None*, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If *None*, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.Collection` instance. .. _axes-pcolor-grid-orientation: The grid orientation follows the Matlab(TM) convention: an array *C* with shape (*nrows*, *ncolumns*) is plotted with the column number as *X* and the row number as *Y*, increasing up; hence it is plotted the way the array would be printed, except that the *Y* axis is reversed. That is, *C* is taken as *C*(*y*, *x*). Similarly for :func:`~matplotlib.pyplot.meshgrid`:: x = np.arange(5) y = np.arange(3) X, Y = meshgrid(x,y) is equivalent to: X = array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Y = array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2]]) so if you have:: C = rand( len(x), len(y)) then you need:: pcolor(X, Y, C.T) or:: pcolor(C.T) Matlab :func:`pcolor` always discards the last row and column of *C*, but matplotlib displays the last row and column if *X* and *Y* are not specified, or if *X* and *Y* have one more row and column than *C*. kwargs can be used to control the :class:`~matplotlib.collection.PolyCollection` properties: %(PolyCollection)s """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') X, Y, C = self._pcolorargs('pcolor', *args) Ny, Nx = X.shape # convert to MA, if necessary. C = ma.asarray(C) X = ma.asarray(X) Y = ma.asarray(Y) mask = ma.getmaskarray(X)+ma.getmaskarray(Y) xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1] # don't plot if C or any of the surrounding vertices are masked. mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask newaxis = np.newaxis compress = np.compress ravelmask = (mask==0).ravel() X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel()) Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel()) X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel()) Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel()) X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel()) Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel()) X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel()) Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel()) npoly = len(X1) xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis], X2[:,newaxis], Y2[:,newaxis], X3[:,newaxis], Y3[:,newaxis], X4[:,newaxis], Y4[:,newaxis], X1[:,newaxis], Y1[:,newaxis]), axis=1) verts = xy.reshape((npoly, 5, 2)) #verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4)) C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel()) if shading == 'faceted': edgecolors = (0,0,0,1), linewidths = (0.25,) else: edgecolors = 'face' linewidths = (1.0,) kwargs.setdefault('edgecolors', edgecolors) kwargs.setdefault('antialiaseds', (0,)) kwargs.setdefault('linewidths', linewidths) collection = mcoll.PolyCollection(verts, **kwargs) collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) x = X.compressed() y = Y.compressed() minx = np.amin(x) maxx = np.amax(x) miny = np.amin(y) maxy = np.amax(y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd def pcolormesh(self, *args, **kwargs): """ call signatures:: pcolormesh(C) pcolormesh(X, Y, C) pcolormesh(C, **kwargs) *C* may be a masked array, but *X* and *Y* may not. Masked array support is implemented via *cmap* and *norm*; in contrast, :func:`~matplotlib.pyplot.pcolor` simply does not draw quadrilaterals with masked colors or vertices. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If None, use rc settings. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If None, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If None, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.QuadMesh` object. kwargs can be used to control the :class:`matplotlib.collections.QuadMesh` properties: %(QuadMesh)s .. seealso:: :func:`~matplotlib.pyplot.pcolor`: For an explanation of the grid orientation and the expansion of 1-D *X* and/or *Y* to 2-D arrays. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') edgecolors = kwargs.pop('edgecolors', 'None') antialiased = kwargs.pop('antialiased', False) X, Y, C = self._pcolorargs('pcolormesh', *args) Ny, Nx = X.shape # convert to one dimensional arrays C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at # lower left corner X = X.ravel() Y = Y.ravel() coords = np.zeros(((Nx * Ny), 2), dtype=float) coords[:, 0] = X coords[:, 1] = Y if shading == 'faceted' or edgecolors != 'None': showedges = 1 else: showedges = 0 collection = mcoll.QuadMesh( Nx - 1, Ny - 1, coords, showedges, antialiased=antialiased) # kwargs are not used collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) minx = np.amin(X) maxx = np.amax(X) miny = np.amin(Y) maxy = np.amax(Y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd def pcolorfast(self, *args, **kwargs): """ pseudocolor plot of a 2-D array Experimental; this is a version of pcolor that does not draw lines, that provides the fastest possible rendering with the Agg backend, and that can handle any quadrilateral grid. Call signatures:: pcolor(C, **kwargs) pcolor(xr, yr, C, **kwargs) pcolor(x, y, C, **kwargs) pcolor(X, Y, C, **kwargs) C is the 2D array of color values corresponding to quadrilateral cells. Let (nr, nc) be its shape. C may be a masked array. ``pcolor(C, **kwargs)`` is equivalent to ``pcolor([0,nc], [0,nr], C, **kwargs)`` *xr*, *yr* specify the ranges of *x* and *y* corresponding to the rectangular region bounding *C*. If:: xr = [x0, x1] and:: yr = [y0,y1] then *x* goes from *x0* to *x1* as the second index of *C* goes from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of cell (0,0), and (*x1*, *y1*) is the outermost corner of cell (*nr*-1, *nc*-1). All cells are rectangles of the same size. This is the fastest version. *x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively, giving the x and y boundaries of the cells. Hence the cells are rectangular but the grid may be nonuniform. The speed is intermediate. (The grid is checked, and if found to be uniform the fast version is used.) *X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify the (x,y) coordinates of the corners of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at (X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]), (X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular. This is the most general, but the slowest to render. It may produce faster and more compact output using ps, pdf, and svg backends, however. Note that the the column index corresponds to the x-coordinate, and the row index corresponds to y; for details, see the "Grid Orientation" section below. Optional keyword arguments: *cmap*: [ None | Colormap ] A cm Colormap instance from cm. If None, use rc settings. *norm*: [ None | Normalize ] An mcolors.Normalize instance is used to scale luminance data to 0,1. If None, defaults to normalize() *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a norm instance, *vmin* and *vmax* will be *None*. *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is an image if a regular or rectangular grid is specified, and a QuadMesh collection in the general quadrilateral case. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) C = args[-1] nr, nc = C.shape if len(args) == 1: style = "image" x = [0, nc] y = [0, nr] elif len(args) == 3: x, y = args[:2] x = np.asarray(x) y = np.asarray(y) if x.ndim == 1 and y.ndim == 1: if x.size == 2 and y.size == 2: style = "image" else: dx = np.diff(x) dy = np.diff(y) if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and np.ptp(dy) < 0.01*np.abs(dy.mean())): style = "image" else: style = "pcolorimage" elif x.ndim == 2 and y.ndim == 2: style = "quadmesh" else: raise TypeError("arguments do not match valid signatures") else: raise TypeError("need 1 argument or 3 arguments") if style == "quadmesh": # convert to one dimensional arrays # This should also be moved to the QuadMesh class C = ma.ravel(C) # data point in each cell is value # at lower left corner X = x.ravel() Y = y.ravel() Nx = nc+1 Ny = nr+1 # The following needs to be cleaned up; the renderer # requires separate contiguous arrays for X and Y, # but the QuadMesh class requires the 2D array. coords = np.empty(((Nx * Ny), 2), np.float64) coords[:, 0] = X coords[:, 1] = Y # The QuadMesh class can also be changed to # handle relevant superclass kwargs; the initializer # should do much more than it does now. collection = mcoll.QuadMesh(nc, nr, coords, 0) collection.set_alpha(alpha) collection.set_array(C) collection.set_cmap(cmap) collection.set_norm(norm) self.add_collection(collection) xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max() ret = collection else: # One of the image styles: xl, xr, yb, yt = x[0], x[-1], y[0], y[-1] if style == "image": im = mimage.AxesImage(self, cmap, norm, interpolation='nearest', origin='lower', extent=(xl, xr, yb, yt), **kwargs) im.set_data(C) im.set_alpha(alpha) self.images.append(im) ret = im if style == "pcolorimage": im = mimage.PcolorImage(self, x, y, C, cmap=cmap, norm=norm, alpha=alpha, **kwargs) self.images.append(im) ret = im self._set_artist_props(ret) if vmin is not None or vmax is not None: ret.set_clim(vmin, vmax) else: ret.autoscale_None() self.update_datalim(np.array([[xl, yb], [xr, yt]])) self.autoscale_view(tight=True) return ret def contour(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = False return mcontour.ContourSet(self, *args, **kwargs) contour.__doc__ = mcontour.ContourSet.contour_doc def contourf(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = True return mcontour.ContourSet(self, *args, **kwargs) contourf.__doc__ = mcontour.ContourSet.contour_doc def clabel(self, CS, *args, **kwargs): return CS.clabel(*args, **kwargs) clabel.__doc__ = mcontour.ContourSet.clabel.__doc__ def table(self, **kwargs): """ call signature:: table(cellText=None, cellColours=None, cellLoc='right', colWidths=None, rowLabels=None, rowColours=None, rowLoc='left', colLabels=None, colColours=None, colLoc='center', loc='bottom', bbox=None): Add a table to the current axes. Returns a :class:`matplotlib.table.Table` instance. For finer grained control over tables, use the :class:`~matplotlib.table.Table` class and add it to the axes with :meth:`~matplotlib.axes.Axes.add_table`. Thanks to John Gill for providing the class and table. kwargs control the :class:`~matplotlib.table.Table` properties: %(Table)s """ return mtable.table(self, **kwargs) table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd def twinx(self): """ call signature:: ax = twinx() create a twin of Axes for generating a plot with a sharex x-axis but independent y axis. The y-axis of self will have ticks on left and the returned axes will have ticks on the right """ ax2 = self.figure.add_axes(self.get_position(True), sharex=self, frameon=False) ax2.yaxis.tick_right() ax2.yaxis.set_label_position('right') self.yaxis.tick_left() return ax2 def twiny(self): """ call signature:: ax = twiny() create a twin of Axes for generating a plot with a shared y-axis but independent x axis. The x-axis of self will have ticks on bottom and the returned axes will have ticks on the top """ ax2 = self.figure.add_axes(self.get_position(True), sharey=self, frameon=False) ax2.xaxis.tick_top() ax2.xaxis.set_label_position('top') self.xaxis.tick_bottom() return ax2 def get_shared_x_axes(self): 'Return a copy of the shared axes Grouper object for x axes' return self._shared_x_axes def get_shared_y_axes(self): 'Return a copy of the shared axes Grouper object for y axes' return self._shared_y_axes #### Data analysis def hist(self, x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs): """ call signature:: hist(x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs) Compute and draw the histogram of *x*. The return value is a tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*, [*patches0*, *patches1*,...]) if the input contains multiple data. Keyword arguments: *bins*: Either an integer number of bins or a sequence giving the bins. *x* are the data to be binned. *x* can be an array, a 2D array with multiple data in its columns, or a list of arrays with data of different length. Note, if *bins* is an integer input argument=numbins, *bins* + 1 bin edges will be returned, compatible with the semantics of :func:`numpy.histogram` with the *new* = True argument. Unequally spaced bins are supported if *bins* is a sequence. *range*: The lower and upper range of the bins. Lower and upper outliers are ignored. If not provided, *range* is (x.min(), x.max()). Range has no effect if *bins* is a sequence. If *bins* is a sequence or *range* is specified, autoscaling is set off (*autoscale_on* is set to *False*) and the xaxis limits are set to encompass the full specified bin range. *normed*: If *True*, the first element of the return tuple will be the counts normalized to form a probability density, i.e., ``n/(len(x)*dbin)``. In a probability density, the integral of the histogram should be 1; you can verify that with a trapezoidal integration of the probability density function:: pdf, bins, patches = ax.hist(...) print np.sum(pdf * np.diff(bins)) *cumulative*: If *True*, then a histogram is computed where each bin gives the counts in that bin plus all bins for smaller values. The last bin gives the total number of datapoints. If *normed* is also *True* then the histogram is normalized such that the last bin equals 1. If *cumulative* evaluates to less than 0 (e.g. -1), the direction of accumulation is reversed. In this case, if *normed* is also *True*, then the histogram is normalized such that the first bin equals 1. *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ] The type of histogram to draw. - 'bar' is a traditional bar-type histogram. If multiple data are given the bars are aranged side by side. - 'barstacked' is a bar-type histogram where multiple data are stacked on top of each other. - 'step' generates a lineplot that is by default unfilled. - 'stepfilled' generates a lineplot that is by default filled. *align*: ['left' | 'mid' | 'right' ] Controls how the histogram is plotted. - 'left': bars are centered on the left bin edges. - 'mid': bars are centered between the bin edges. - 'right': bars are centered on the right bin edges. *orientation*: [ 'horizontal' | 'vertical' ] If 'horizontal', :func:`~matplotlib.pyplot.barh` will be used for bar-type histograms and the *bottom* kwarg will be the left edges. *rwidth*: The relative width of the bars as a fraction of the bin width. If *None*, automatically compute the width. Ignored if *histtype* = 'step' or 'stepfilled'. *log*: If *True*, the histogram axis will be set to a log scale. If *log* is *True* and *x* is a 1D array, empty bins will be filtered out and only the non-empty (*n*, *bins*, *patches*) will be returned. kwargs are used to update the properties of the hist :class:`~matplotlib.patches.Rectangle` instances: %(Rectangle)s You can use labels for your histogram, and only the first :class:`~matplotlib.patches.Rectangle` gets the label (the others get the magic string '_nolegend_'. This will make the histograms work in the intuitive way for bar charts:: ax.hist(10+2*np.random.randn(1000), label='men') ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5) ax.legend() **Example:** .. plot:: mpl_examples/pylab_examples/histogram_demo.py """ if not self._hold: self.cla() # NOTE: the range keyword overwrites the built-in func range !!! # needs to be fixed in with numpy !!! if kwargs.get('width') is not None: raise DeprecationWarning( 'hist now uses the rwidth to give relative width ' 'and not absolute width') try: # make sure a copy is created: don't use asarray x = np.transpose(np.array(x)) if len(x.shape)==1: x.shape = (1,x.shape[0]) elif len(x.shape)==2 and x.shape[1]<x.shape[0]: warnings.warn('2D hist should be nsamples x nvariables; ' 'this looks transposed') except ValueError: # multiple hist with data of different length if iterable(x[0]) and not is_string_like(x[0]): tx = [] for i in xrange(len(x)): tx.append( np.array(x[i]) ) x = tx else: raise ValueError, 'Can not use providet data to create a histogram' # Check whether bins or range are given explicitly. In that # case do not autoscale axes. binsgiven = (cbook.iterable(bins) or range != None) # check the version of the numpy if np.__version__ < "1.3": # version 1.1 and 1.2 hist_kwargs = dict(range=range, normed=bool(normed), new=True) else: # version 1.3 and later, drop new=True hist_kwargs = dict(range=range, normed=bool(normed)) n = [] for i in xrange(len(x)): # this will automatically overwrite bins, # so that each histogram uses the same bins m, bins = np.histogram(x[i], bins, **hist_kwargs) n.append(m) if cumulative: slc = slice(None) if cbook.is_numlike(cumulative) and cumulative < 0: slc = slice(None,None,-1) if normed: n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n] else: n = [m[slc].cumsum()[slc] for m in n] patches = [] if histtype.startswith('bar'): totwidth = np.diff(bins) stacked = False if rwidth is not None: dr = min(1., max(0., rwidth)) elif len(n)>1: dr = 0.8 else: dr = 1.0 if histtype=='bar': width = dr*totwidth/len(n) dw = width if len(n)>1: boffset = -0.5*dr*totwidth*(1.-1./len(n)) else: boffset = 0.0 elif histtype=='barstacked': width = dr*totwidth boffset, dw = 0.0, 0.0 stacked = True else: raise ValueError, 'invalid histtype: %s' % histtype if align == 'mid' or align == 'edge': boffset += 0.5*totwidth elif align == 'right': boffset += totwidth elif align != 'left' and align != 'center': raise ValueError, 'invalid align: %s' % align if orientation == 'horizontal': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.barh(bins[:-1]+boffset, m, height=width, left=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw elif orientation == 'vertical': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.bar(bins[:-1]+boffset, m, width=width, bottom=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw else: raise ValueError, 'invalid orientation: %s' % orientation elif histtype.startswith('step'): x = np.zeros( 2*len(bins), np.float ) y = np.zeros( 2*len(bins), np.float ) x[0::2], x[1::2] = bins, bins if align == 'left' or align == 'center': x -= 0.5*(bins[1]-bins[0]) elif align == 'right': x += 0.5*(bins[1]-bins[0]) elif align != 'mid' and align != 'edge': raise ValueError, 'invalid align: %s' % align if log: y[0],y[-1] = 1e-100, 1e-100 if orientation == 'horizontal': self.set_xscale('log') elif orientation == 'vertical': self.set_yscale('log') fill = False if histtype == 'stepfilled': fill = True elif histtype != 'step': raise ValueError, 'invalid histtype: %s' % histtype for m in n: y[1:-1:2], y[2::2] = m, m if orientation == 'horizontal': x,y = y,x elif orientation != 'vertical': raise ValueError, 'invalid orientation: %s' % orientation color = self._get_lines._get_next_cycle_color() if fill: patches.append( self.fill(x, y, closed=False, facecolor=color) ) else: patches.append( self.fill(x, y, closed=False, edgecolor=color, fill=False) ) # adopted from adjust_x/ylim part of the bar method if orientation == 'horizontal': xmin, xmax = 0, self.dataLim.intervalx[1] for m in n: xmin = np.amin(m[m!=0]) # filter out the 0 height bins xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) elif orientation == 'vertical': ymin, ymax = 0, self.dataLim.intervaly[1] for m in n: ymin = np.amin(m[m!=0]) # filter out the 0 height bins ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() else: raise ValueError, 'invalid histtype: %s' % histtype label = kwargs.pop('label', '') for patch in patches: for p in patch: p.update(kwargs) p.set_label(label) label = '_nolegend_' if binsgiven: self.set_autoscale_on(False) if orientation == 'vertical': self.autoscale_view(scalex=False, scaley=True) XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_xbound(XL) else: self.autoscale_view(scalex=True, scaley=False) YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_ybound(YL) if len(n)==1: return n[0], bins, cbook.silent_list('Patch', patches[0]) else: return n, bins, cbook.silent_list('Lists of Patches', patches) hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The power spectral density by Welch's average periodogram method. The vector *x* is divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. *noverlap* gives the length of the overlap between segments. The :math:`|\mathrm{fft}(i)|^2` of each segment :math:`i` are averaged to compute *Pxx*, with a scaling to correct for power loss due to windowing. *Fs* is the sampling frequency. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns the tuple (*Pxx*, *freqs*). For plotting, the power is plotted as :math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself is returned. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/psd_demo.py """ if not self._hold: self.cla() pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxx.shape = len(freqs), freqs += Fc if scale_by_freq in (None, True): psd_units = 'dB/Hz' else: psd_units = 'dB' self.plot(freqs, 10*np.log10(pxx), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Power Spectral Density (%s)' % psd_units) self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin logi = int(np.log10(intv)) if logi==0: logi=.1 step = 10*logi #print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1 ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxx, freqs psd_doc_dict = dict() psd_doc_dict.update(martist.kwdocd) psd_doc_dict.update(mlab.kwdocd) psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD']) psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The cross spectral density :math:`P_{xy}` by Welch's average periodogram method. The vectors *x* and *y* are divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. The product of the direct FFTs of *x* and *y* are averaged over each segment to compute :math:`P_{xy}`, with a scaling to correct for power loss due to windowing. Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum (complex valued), and :math:`10\log_{10}|P_{xy}|` is plotted. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the Line2D properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/csd_demo.py .. seealso: :meth:`psd` For a description of the optional parameters. """ if not self._hold: self.cla() pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxy.shape = len(freqs), # pxy is complex freqs += Fc self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Cross Spectrum Magnitude (dB)') self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin step = 10*int(np.log10(intv)) ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxy, freqs csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none, window = mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) cohere the coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. The return value is a tuple (*Cxy*, *f*), where *f* are the frequencies of the coherence vector. kwargs are applied to the lines. References: * Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties of the coherence plot: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/cohere_demo.py """ if not self._hold: self.cla() cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap, scale_by_freq) freqs += Fc self.plot(freqs, cxy, **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Coherence') self.grid(True) return cxy, freqs cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None): """ call signature:: specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None) Compute a spectrogram of data in *x*. Data are split into *NFFT* length segments and the PSD of each section is computed. The windowing function *window* is applied to each segment, and the amount of overlap of each segment is specified with *noverlap*. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the y extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. *cmap*: A :class:`matplotlib.cm.Colormap` instance; if *None* use default determined by rc *xextent*: The image extent along the x-axis. xextent = (xmin,xmax) The default is (0,max(bins)), where bins is the return value from :func:`mlab.specgram` Return value is (*Pxx*, *freqs*, *bins*, *im*): - *bins* are the time points the spectrogram is calculated over - *freqs* is an array of frequencies - *Pxx* is a len(times) x len(freqs) array of power - *im* is a :class:`matplotlib.image.AxesImage` instance Note: If *x* is real (i.e. non-complex), only the positive spectrum is shown. If *x* is complex, both positive and negative parts of the spectrum are shown. This can be overridden using the *sides* keyword argument. **Example:** .. plot:: mpl_examples/pylab_examples/specgram_demo.py """ if not self._hold: self.cla() Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Z = 10. * np.log10(Pxx) Z = np.flipud(Z) if xextent is None: xextent = 0, np.amax(bins) xmin, xmax = xextent freqs += Fc extent = xmin, xmax, freqs[0], freqs[-1] im = self.imshow(Z, cmap, extent=extent) self.axis('auto') return Pxx, freqs, bins, im specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict del psd_doc_dict #So that this does not become an Axes attribute def spy(self, Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs): """ call signature:: spy(Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs) ``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*. If *precision* is 0, any non-zero value will be plotted; else, values of :math:`|Z| > precision` will be plotted. For :class:`scipy.sparse.spmatrix` instances, there is a special case: if *precision* is 'present', any value present in the array will be plotted, even if it is identically zero. The array will be plotted as it would be printed, with the first index (row) increasing down and the second index (column) increasing to the right. By default aspect is 'equal', so that each array element occupies a square space; set the aspect kwarg to 'auto' to allow the plot to fill the plot box, or to any scalar number to specify the aspect ratio of an array element directly. Two plotting styles are available: image or marker. Both are available for full arrays, but only the marker style works for :class:`scipy.sparse.spmatrix` instances. If *marker* and *markersize* are *None*, an image will be returned and any remaining kwargs are passed to :func:`~matplotlib.pyplot.imshow`; else, a :class:`~matplotlib.lines.Line2D` object will be returned with the value of marker determining the marker type, and any remaining kwargs passed to the :meth:`~matplotlib.axes.Axes.plot` method. If *marker* and *markersize* are *None*, useful kwargs include: * *cmap* * *alpha* .. seealso:: :func:`~matplotlib.pyplot.imshow` For controlling colors, e.g. cyan background and red marks, use:: cmap = mcolors.ListedColormap(['c','r']) If *marker* or *markersize* is not *None*, useful kwargs include: * *marker* * *markersize* * *color* Useful values for *marker* include: * 's' square (default) * 'o' circle * '.' point * ',' pixel .. seealso:: :func:`~matplotlib.pyplot.plot` """ if precision is None: precision = 0 warnings.DeprecationWarning("Use precision=0 instead of None") # 2008/10/03 if marker is None and markersize is None and hasattr(Z, 'tocoo'): marker = 's' if marker is None and markersize is None: Z = np.asarray(Z) mask = np.absolute(Z)>precision if 'cmap' not in kwargs: kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'], name='binary') nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] ret = self.imshow(mask, interpolation='nearest', aspect=aspect, extent=extent, origin='upper', **kwargs) else: if hasattr(Z, 'tocoo'): c = Z.tocoo() if precision == 'present': y = c.row x = c.col else: nonzero = np.absolute(c.data) > precision y = c.row[nonzero] x = c.col[nonzero] else: Z = np.asarray(Z) nonzero = np.absolute(Z)>precision y, x = np.nonzero(nonzero) if marker is None: marker = 's' if markersize is None: markersize = 10 marks = mlines.Line2D(x, y, linestyle='None', marker=marker, markersize=markersize, **kwargs) self.add_line(marks) nr, nc = Z.shape self.set_xlim(xmin=-0.5, xmax=nc-0.5) self.set_ylim(ymin=nr-0.5, ymax=-0.5) self.set_aspect(aspect) ret = marks self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return ret def matshow(self, Z, **kwargs): ''' Plot a matrix or array as an image. The matrix will be shown the way it would be printed, with the first row at the top. Row and column numbering is zero-based. Argument: *Z* anything that can be interpreted as a 2-D array kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`. :meth:`matshow` sets defaults for *extent*, *origin*, *interpolation*, and *aspect*; use care in overriding the *extent* and *origin* kwargs, because they interact. (Also, if you want to change them, you probably should be using imshow directly in your own version of matshow.) Returns: an :class:`matplotlib.image.AxesImage` instance. ''' Z = np.asarray(Z) nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] kw = {'extent': extent, 'origin': 'upper', 'interpolation': 'nearest', 'aspect': 'equal'} # (already the imshow default) kw.update(kwargs) im = self.imshow(Z, **kw) self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return im class SubplotBase: """ Base class for subplots, which are :class:`Axes` instances with additional methods to facilitate generating and manipulating a set of :class:`Axes` within a figure. """ def __init__(self, fig, *args, **kwargs): """ *fig* is a :class:`matplotlib.figure.Figure` instance. *args* is the tuple (*numRows*, *numCols*, *plotNum*), where the array of subplots in the figure has dimensions *numRows*, *numCols*, and where *plotNum* is the number of the subplot being created. *plotNum* starts at 1 in the upper left corner and increases to the right. If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*. """ self.figure = fig if len(args)==1: s = str(args[0]) if len(s) != 3: raise ValueError('Argument to subplot must be a 3 digits long') rows, cols, num = map(int, s) elif len(args)==3: rows, cols, num = args else: raise ValueError( 'Illegal argument to subplot') total = rows*cols num -= 1 # convert from matlab to python indexing # ie num in range(0,total) if num >= total: raise ValueError( 'Subplot number exceeds total subplots') self._rows = rows self._cols = cols self._num = num self.update_params() # _axes_class is set in the subplot_class_factory self._axes_class.__init__(self, fig, self.figbox, **kwargs) def get_geometry(self): 'get the subplot geometry, eg 2,2,3' return self._rows, self._cols, self._num+1 # COVERAGE NOTE: Never used internally or from examples def change_geometry(self, numrows, numcols, num): 'change subplot geometry, eg. from 1,1,1 to 2,2,3' self._rows = numrows self._cols = numcols self._num = num-1 self.update_params() self.set_position(self.figbox) def update_params(self): 'update the subplot position from fig.subplotpars' rows = self._rows cols = self._cols num = self._num pars = self.figure.subplotpars left = pars.left right = pars.right bottom = pars.bottom top = pars.top wspace = pars.wspace hspace = pars.hspace totWidth = right-left totHeight = top-bottom figH = totHeight/(rows + hspace*(rows-1)) sepH = hspace*figH figW = totWidth/(cols + wspace*(cols-1)) sepW = wspace*figW rowNum, colNum = divmod(num, cols) figBottom = top - (rowNum+1)*figH - rowNum*sepH figLeft = left + colNum*(figW + sepW) self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom, figW, figH) self.rowNum = rowNum self.colNum = colNum self.numRows = rows self.numCols = cols if 0: print 'rcn', rows, cols, num print 'lbrt', left, bottom, right, top print 'self.figBottom', self.figBottom print 'self.figLeft', self.figLeft print 'self.figW', self.figW print 'self.figH', self.figH print 'self.rowNum', self.rowNum print 'self.colNum', self.colNum print 'self.numRows', self.numRows print 'self.numCols', self.numCols def is_first_col(self): return self.colNum==0 def is_first_row(self): return self.rowNum==0 def is_last_row(self): return self.rowNum==self.numRows-1 def is_last_col(self): return self.colNum==self.numCols-1 # COVERAGE NOTE: Never used internally or from examples def label_outer(self): """ set the visible property on ticklabels so xticklabels are visible only if the subplot is in the last row and yticklabels are visible only if the subplot is in the first column """ lastrow = self.is_last_row() firstcol = self.is_first_col() for label in self.get_xticklabels(): label.set_visible(lastrow) for label in self.get_yticklabels(): label.set_visible(firstcol) _subplot_classes = {} def subplot_class_factory(axes_class=None): # This makes a new class that inherits from SubclassBase and the # given axes_class (which is assumed to be a subclass of Axes). # This is perhaps a little bit roundabout to make a new class on # the fly like this, but it means that a new Subplot class does # not have to be created for every type of Axes. if axes_class is None: axes_class = Axes new_class = _subplot_classes.get(axes_class) if new_class is None: new_class = new.classobj("%sSubplot" % (axes_class.__name__), (SubplotBase, axes_class), {'_axes_class': axes_class}) _subplot_classes[axes_class] = new_class return new_class # This is provided for backward compatibility Subplot = subplot_class_factory() martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes) """ # this is some discarded code I was using to find the minimum positive # data point for some log scaling fixes. I realized there was a # cleaner way to do it, but am keeping this around as an example for # how to get the data out of the axes. Might want to make something # like this a method one day, or better yet make get_verts an Artist # method minx, maxx = self.get_xlim() if minx<=0 or maxx<=0: # find the min pos value in the data xs = [] for line in self.lines: xs.extend(line.get_xdata(orig=False)) for patch in self.patches: xs.extend([x for x,y in patch.get_verts()]) for collection in self.collections: xs.extend([x for x,y in collection.get_verts()]) posx = [x for x in xs if x>0] if len(posx): minx = min(posx) maxx = max(posx) # warning, probably breaks inverted axis self.set_xlim((0.1*minx, maxx)) """
agpl-3.0
petteyg/intellij-community
python/helpers/pydev/pydev_ipython/matplotlibtools.py
52
5401
import sys backends = {'tk': 'TkAgg', 'gtk': 'GTKAgg', 'wx': 'WXAgg', 'qt': 'Qt4Agg', # qt3 not supported 'qt4': 'Qt4Agg', 'osx': 'MacOSX'} # We also need a reverse backends2guis mapping that will properly choose which # GUI support to activate based on the desired matplotlib backend. For the # most part it's just a reverse of the above dict, but we also need to add a # few others that map to the same GUI manually: backend2gui = dict(zip(backends.values(), backends.keys())) backend2gui['Qt4Agg'] = 'qt' # In the reverse mapping, there are a few extra valid matplotlib backends that # map to the same GUI support backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk' backend2gui['WX'] = 'wx' backend2gui['CocoaAgg'] = 'osx' def do_enable_gui(guiname): from pydev_versioncheck import versionok_for_gui if versionok_for_gui(): try: from pydev_ipython.inputhook import enable_gui enable_gui(guiname) except: sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname) import traceback traceback.print_exc() elif guiname not in ['none', '', None]: # Only print a warning if the guiname was going to do something sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname) # Return value does not matter, so return back what was sent return guiname def find_gui_and_backend(): """Return the gui and mpl backend.""" matplotlib = sys.modules['matplotlib'] # WARNING: this assumes matplotlib 1.1 or newer!! backend = matplotlib.rcParams['backend'] # In this case, we need to find what the appropriate gui selection call # should be for IPython, so we can activate inputhook accordingly gui = backend2gui.get(backend, None) return gui, backend def is_interactive_backend(backend): """ Check if backend is interactive """ matplotlib = sys.modules['matplotlib'] from matplotlib.rcsetup import interactive_bk, non_interactive_bk if backend in interactive_bk: return True elif backend in non_interactive_bk: return False else: return matplotlib.is_interactive() def patch_use(enable_gui_function): """ Patch matplotlib function 'use' """ matplotlib = sys.modules['matplotlib'] def patched_use(*args, **kwargs): matplotlib.real_use(*args, **kwargs) gui, backend = find_gui_and_backend() enable_gui_function(gui) setattr(matplotlib, "real_use", getattr(matplotlib, "use")) setattr(matplotlib, "use", patched_use) def patch_is_interactive(): """ Patch matplotlib function 'use' """ matplotlib = sys.modules['matplotlib'] def patched_is_interactive(): return matplotlib.rcParams['interactive'] setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive")) setattr(matplotlib, "is_interactive", patched_is_interactive) def activate_matplotlib(enable_gui_function): """Set interactive to True for interactive backends. enable_gui_function - Function which enables gui, should be run in the main thread. """ matplotlib = sys.modules['matplotlib'] gui, backend = find_gui_and_backend() is_interactive = is_interactive_backend(backend) if is_interactive: enable_gui_function(gui) if not matplotlib.is_interactive(): sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend) matplotlib.interactive(True) else: if matplotlib.is_interactive(): sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend) matplotlib.interactive(False) patch_use(enable_gui_function) patch_is_interactive() def flag_calls(func): """Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded.""" # don't wrap twice if hasattr(func, 'called'): return func def wrapper(*args,**kw): wrapper.called = False out = func(*args,**kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper def activate_pylab(): pylab = sys.modules['pylab'] pylab.show._needmain = False # We need to detect at runtime whether show() is called by the user. # For this, we wrap it into a decorator which adds a 'called' flag. pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive) def activate_pyplot(): pyplot = sys.modules['matplotlib.pyplot'] pyplot.show._needmain = False # We need to detect at runtime whether show() is called by the user. # For this, we wrap it into a decorator which adds a 'called' flag. pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
apache-2.0
hewittc/proxmark3lcd
fpga/tests/plot_edgedetect.py
14
1553
#!/usr/bin/env python #----------------------------------------------------------------------------- # Copyright (C) 2014 iZsh <izsh at fail0verflow.com> # # This code is licensed to you under the terms of the GNU GPL, version 2 or, # at your option, any later version. See the LICENSE.txt file for the text of # the license. #----------------------------------------------------------------------------- import numpy import matplotlib.pyplot as plt import sys if len(sys.argv) != 2: print "Usage: %s <basename>" % sys.argv[0] sys.exit(1) BASENAME = sys.argv[1] nx = numpy.fromfile(BASENAME + ".time") def plot_time(dat1): plt.plot(nx, dat1) sig = open(BASENAME + ".filtered").read() sig = map(lambda x: ord(x), sig) min_vals = open(BASENAME + ".min").read() min_vals = map(lambda x: ord(x), min_vals) max_vals = open(BASENAME + ".max").read() max_vals = map(lambda x: ord(x), max_vals) states = open(BASENAME + ".state").read() states = map(lambda x: ord(x) * 10 + 65, states) toggles = open(BASENAME+ ".toggle").read() toggles = map(lambda x: ord(x) * 10 + 80, toggles) high = open(BASENAME + ".high").read() high = map(lambda x: ord(x), high) highz = open(BASENAME + ".highz").read() highz = map(lambda x: ord(x), highz) lowz = open(BASENAME + ".lowz").read() lowz = map(lambda x: ord(x), lowz) low = open(BASENAME + ".low").read() low = map(lambda x: ord(x), low) plot_time(sig) plot_time(min_vals) plot_time(max_vals) plot_time(states) plot_time(toggles) plot_time(high) plot_time(highz) plot_time(lowz) plot_time(low) plt.show()
gpl-2.0
wazeerzulfikar/scikit-learn
benchmarks/bench_mnist.py
45
6977
""" ======================= MNIST dataset benchmark ======================= Benchmark on the MNIST dataset. The dataset comprises 70,000 samples and 784 features. Here, we consider the task of predicting 10 classes - digits from 0 to 9 from their raw images. By contrast to the covertype dataset, the feature space is homogenous. Example of output : [..] Classification performance: =========================== Classifier train-time test-time error-rate ------------------------------------------------------------ MLP_adam 53.46s 0.11s 0.0224 Nystroem-SVM 112.97s 0.92s 0.0228 MultilayerPerceptron 24.33s 0.14s 0.0287 ExtraTrees 42.99s 0.57s 0.0294 RandomForest 42.70s 0.49s 0.0318 SampledRBF-SVM 135.81s 0.56s 0.0486 LinearRegression-SAG 16.67s 0.06s 0.0824 CART 20.69s 0.02s 0.1219 dummy 0.00s 0.01s 0.8973 """ from __future__ import division, print_function # Author: Issam H. Laradji # Arnaud Joly <[email protected]> # License: BSD 3 clause import os from time import time import argparse import numpy as np from sklearn.datasets import fetch_mldata from sklearn.datasets import get_data_home from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.dummy import DummyClassifier from sklearn.externals.joblib import Memory from sklearn.kernel_approximation import Nystroem from sklearn.kernel_approximation import RBFSampler from sklearn.metrics import zero_one_loss from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils import check_array from sklearn.linear_model import LogisticRegression from sklearn.neural_network import MLPClassifier # Memoize the data extraction and memory map the resulting # train / test splits in readonly mode memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'), mmap_mode='r') @memory.cache def load_data(dtype=np.float32, order='F'): """Load the data, then cache and memmap the train/test split""" ###################################################################### # Load dataset print("Loading dataset...") data = fetch_mldata('MNIST original') X = check_array(data['data'], dtype=dtype, order=order) y = data["target"] # Normalize features X = X / 255 # Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 60000 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] return X_train, X_test, y_train, y_test ESTIMATORS = { "dummy": DummyClassifier(), 'CART': DecisionTreeClassifier(), 'ExtraTrees': ExtraTreesClassifier(n_estimators=100), 'RandomForest': RandomForestClassifier(n_estimators=100), 'Nystroem-SVM': make_pipeline( Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'SampledRBF-SVM': make_pipeline( RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4), 'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1, C=1e4), 'MultilayerPerceptron': MLPClassifier( hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4, solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1, tol=1e-4, random_state=1), 'MLP-adam': MLPClassifier( hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4, solver='adam', learning_rate_init=0.001, verbose=1, tol=1e-4, random_state=1) } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--classifiers', nargs="+", choices=ESTIMATORS, type=str, default=['ExtraTrees', 'Nystroem-SVM'], help="list of classifiers to benchmark.") parser.add_argument('--n-jobs', nargs="?", default=1, type=int, help="Number of concurrently running workers for " "models that support parallelism.") parser.add_argument('--order', nargs="?", default="C", type=str, choices=["F", "C"], help="Allow to choose between fortran and C ordered " "data") parser.add_argument('--random-seed', nargs="?", default=0, type=int, help="Common seed used by random number generator.") args = vars(parser.parse_args()) print(__doc__) X_train, X_test, y_train, y_test = load_data(order=args["order"]) print("") print("Dataset statistics:") print("===================") print("%s %d" % ("number of features:".ljust(25), X_train.shape[1])) print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size)) print("%s %s" % ("data type:".ljust(25), X_train.dtype)) print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25), X_train.shape[0], int(X_train.nbytes / 1e6))) print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25), X_test.shape[0], int(X_test.nbytes / 1e6))) print() print("Training Classifiers") print("====================") error, train_time, test_time = {}, {}, {} for name in sorted(args["classifiers"]): print("Training %s ... " % name, end="") estimator = ESTIMATORS[name] estimator_params = estimator.get_params() estimator.set_params(**{p: args["random_seed"] for p in estimator_params if p.endswith("random_state")}) if "n_jobs" in estimator_params: estimator.set_params(n_jobs=args["n_jobs"]) time_start = time() estimator.fit(X_train, y_train) train_time[name] = time() - time_start time_start = time() y_pred = estimator.predict(X_test) test_time[name] = time() - time_start error[name] = zero_one_loss(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print("{0: <24} {1: >10} {2: >11} {3: >12}" "".format("Classifier ", "train-time", "test-time", "error-rate")) print("-" * 60) for name in sorted(args["classifiers"], key=error.get): print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}" "".format(name, train_time[name], test_time[name], error[name])) print()
bsd-3-clause
mehdidc/scikit-learn
examples/applications/plot_stock_market.py
29
8284
""" ======================================= Visualizing the stock market structure ======================================= This example employs several unsupervised learning techniques to extract the stock market structure from variations in historical quotes. The quantity that we use is the daily variation in quote price: quotes that are linked tend to cofluctuate during a day. .. _stock_market: Learning a graph structure -------------------------- We use sparse inverse covariance estimation to find which quotes are correlated conditionally on the others. Specifically, sparse inverse covariance gives us a graph, that is a list of connection. For each symbol, the symbols that it is connected too are those useful to explain its fluctuations. Clustering ---------- We use clustering to group together quotes that behave similarly. Here, amongst the :ref:`various clustering techniques <clustering>` available in the scikit-learn, we use :ref:`affinity_propagation` as it does not enforce equal-size clusters, and it can choose automatically the number of clusters from the data. Note that this gives us a different indication than the graph, as the graph reflects conditional relations between variables, while the clustering reflects marginal properties: variables clustered together can be considered as having a similar impact at the level of the full stock market. Embedding in 2D space --------------------- For visualization purposes, we need to lay out the different symbols on a 2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D embedding. Visualization ------------- The output of the 3 models are combined in a 2D graph where nodes represents the stocks and edges the: - cluster labels are used to define the color of the nodes - the sparse covariance model is used to display the strength of the edges - the 2D embedding is used to position the nodes in the plan This example has a fair amount of visualization-related code, as visualization is crucial here to display the graph. One of the challenge is to position the labels minimizing overlap. For this we use an heuristic based on the direction of the nearest neighbor along each axis. """ print(__doc__) # Author: Gael Varoquaux [email protected] # License: BSD 3 clause import datetime import numpy as np import matplotlib.pyplot as plt from matplotlib import finance from matplotlib.collections import LineCollection from sklearn import cluster, covariance, manifold ############################################################################### # Retrieve the data from Internet # Choose a time period reasonnably calm (not too long ago so that we get # high-tech firms, and before the 2008 crash) d1 = datetime.datetime(2003, 1, 1) d2 = datetime.datetime(2008, 1, 1) # kraft symbol has now changed from KFT to MDLZ in yahoo symbol_dict = { 'TOT': 'Total', 'XOM': 'Exxon', 'CVX': 'Chevron', 'COP': 'ConocoPhillips', 'VLO': 'Valero Energy', 'MSFT': 'Microsoft', 'IBM': 'IBM', 'TWX': 'Time Warner', 'CMCSA': 'Comcast', 'CVC': 'Cablevision', 'YHOO': 'Yahoo', 'DELL': 'Dell', 'HPQ': 'HP', 'AMZN': 'Amazon', 'TM': 'Toyota', 'CAJ': 'Canon', 'MTU': 'Mitsubishi', 'SNE': 'Sony', 'F': 'Ford', 'HMC': 'Honda', 'NAV': 'Navistar', 'NOC': 'Northrop Grumman', 'BA': 'Boeing', 'KO': 'Coca Cola', 'MMM': '3M', 'MCD': 'Mc Donalds', 'PEP': 'Pepsi', 'MDLZ': 'Kraft Foods', 'K': 'Kellogg', 'UN': 'Unilever', 'MAR': 'Marriott', 'PG': 'Procter Gamble', 'CL': 'Colgate-Palmolive', 'GE': 'General Electrics', 'WFC': 'Wells Fargo', 'JPM': 'JPMorgan Chase', 'AIG': 'AIG', 'AXP': 'American express', 'BAC': 'Bank of America', 'GS': 'Goldman Sachs', 'AAPL': 'Apple', 'SAP': 'SAP', 'CSCO': 'Cisco', 'TXN': 'Texas instruments', 'XRX': 'Xerox', 'LMT': 'Lookheed Martin', 'WMT': 'Wal-Mart', 'WAG': 'Walgreen', 'HD': 'Home Depot', 'GSK': 'GlaxoSmithKline', 'PFE': 'Pfizer', 'SNY': 'Sanofi-Aventis', 'NVS': 'Novartis', 'KMB': 'Kimberly-Clark', 'R': 'Ryder', 'GD': 'General Dynamics', 'RTN': 'Raytheon', 'CVS': 'CVS', 'CAT': 'Caterpillar', 'DD': 'DuPont de Nemours'} symbols, names = np.array(list(symbol_dict.items())).T quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True) for symbol in symbols] open = np.array([q.open for q in quotes]).astype(np.float) close = np.array([q.close for q in quotes]).astype(np.float) # The daily variations of the quotes are what carry most information variation = close - open ############################################################################### # Learn a graphical structure from the correlations edge_model = covariance.GraphLassoCV() # standardize the time series: using correlations rather than covariance # is more efficient for structure recovery X = variation.copy().T X /= X.std(axis=0) edge_model.fit(X) ############################################################################### # Cluster using affinity propagation _, labels = cluster.affinity_propagation(edge_model.covariance_) n_labels = labels.max() for i in range(n_labels + 1): print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i]))) ############################################################################### # Find a low-dimension embedding for visualization: find the best position of # the nodes (the stocks) on a 2D plane # We use a dense eigen_solver to achieve reproducibility (arpack is # initiated with random vectors that we don't control). In addition, we # use a large number of neighbors to capture the large-scale structure. node_position_model = manifold.LocallyLinearEmbedding( n_components=2, eigen_solver='dense', n_neighbors=6) embedding = node_position_model.fit_transform(X.T).T ############################################################################### # Visualization plt.figure(1, facecolor='w', figsize=(10, 8)) plt.clf() ax = plt.axes([0., 0., 1., 1.]) plt.axis('off') # Display a graph of the partial correlations partial_correlations = edge_model.precision_.copy() d = 1 / np.sqrt(np.diag(partial_correlations)) partial_correlations *= d partial_correlations *= d[:, np.newaxis] non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02) # Plot the nodes using the coordinates of our embedding plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=plt.cm.spectral) # Plot the edges start_idx, end_idx = np.where(non_zero) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[embedding[:, start], embedding[:, stop]] for start, stop in zip(start_idx, end_idx)] values = np.abs(partial_correlations[non_zero]) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, .7 * values.max())) lc.set_array(values) lc.set_linewidths(15 * values) ax.add_collection(lc) # Add a label to each node. The challenge here is that we want to # position the labels to avoid overlap with other labels for index, (name, label, (x, y)) in enumerate( zip(names, labels, embedding.T)): dx = x - embedding[0] dx[index] = 1 dy = y - embedding[1] dy[index] = 1 this_dx = dx[np.argmin(np.abs(dy))] this_dy = dy[np.argmin(np.abs(dx))] if this_dx > 0: horizontalalignment = 'left' x = x + .002 else: horizontalalignment = 'right' x = x - .002 if this_dy > 0: verticalalignment = 'bottom' y = y + .002 else: verticalalignment = 'top' y = y - .002 plt.text(x, y, name, size=10, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, bbox=dict(facecolor='w', edgecolor=plt.cm.spectral(label / float(n_labels)), alpha=.6)) plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(), embedding[0].max() + .10 * embedding[0].ptp(),) plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(), embedding[1].max() + .03 * embedding[1].ptp()) plt.show()
bsd-3-clause
klfranco/klfranco.github.io
markdown_generator/publications.py
197
3887
# coding: utf-8 # # Publications markdown generator for academicpages # # Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format. # # TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style. # # ## Data format # # The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top. # # - `excerpt` and `paper_url` can be blank, but the others must have values. # - `pub_date` must be formatted as YYYY-MM-DD. # - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]` # ## Import pandas # # We are using the very handy pandas library for dataframes. # In[2]: import pandas as pd # ## Import TSV # # Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`. # # I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others. # In[3]: publications = pd.read_csv("publications.tsv", sep="\t", header=0) publications # ## Escape special characters # # YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely. # In[4]: html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;" } def html_escape(text): """Produce entities within text.""" return "".join(html_escape_table.get(c,c) for c in text) # ## Creating the markdown files # # This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation") # In[5]: import os for row, item in publications.iterrows(): md_filename = str(item.pub_date) + "-" + item.url_slug + ".md" html_filename = str(item.pub_date) + "-" + item.url_slug year = item.pub_date[:4] ## YAML variables md = "---\ntitle: \"" + item.title + '"\n' md += """collection: publications""" md += """\npermalink: /publication/""" + html_filename if len(str(item.excerpt)) > 5: md += "\nexcerpt: '" + html_escape(item.excerpt) + "'" md += "\ndate: " + str(item.pub_date) md += "\nvenue: '" + html_escape(item.venue) + "'" if len(str(item.paper_url)) > 5: md += "\npaperurl: '" + item.paper_url + "'" md += "\ncitation: '" + html_escape(item.citation) + "'" md += "\n---" ## Markdown description for individual page if len(str(item.paper_url)) > 5: md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n" if len(str(item.excerpt)) > 5: md += "\n" + html_escape(item.excerpt) + "\n" md += "\nRecommended citation: " + item.citation md_filename = os.path.basename(md_filename) with open("../_publications/" + md_filename, 'w') as f: f.write(md)
mit
pdebuyl-lab/RMPCDMD
experiments/03-single-janus/rotation_analysis.py
2
2482
#!/usr/bin/env python """ Analyze the rotational motion of a L-shaped colloid. """ from __future__ import print_function, division import numpy as np import h5py import argparse parser = argparse.ArgumentParser() parser.add_argument('file') parser.add_argument('--plot', action='store_true') parser.add_argument('--nx', type=int, default=5, help='number of beads along the bottom of the L') parser.add_argument('--ny', type=int, default=8, help='number of beads along the long arm of the L') parser.add_argument('--n-tiles', type=int, default=2, help='number of tiles for the thickness of the bottom arm') parser.add_argument('--arm-width', type=int, default=2, help='width of long arm') args = parser.parse_args() with h5py.File(args.file, 'r') as a: edges = a['particles/janus/box/edges'][:] pos = a['particles/janus/position/value'][:] im = a['particles/janus/image/value'][:] vel = a['particles/janus/velocity/value'][:] pos_com = a['observables/janus_pos/value'][:] vel_com = vel.mean(axis=1) pos = pos + im*edges.reshape((1,1,-1)) n_planar = args.nx * args.n_tiles + args.arm_width * (args.ny - args.n_tiles) print(n_planar) one_z = pos[:,n_planar,:] - pos[:,0,:] one_z = one_z / np.sqrt(np.sum(one_z**2, axis=1)).reshape((-1,1)) v12 = vel[:,n_planar-args.arm_width,:] - vel[:,0,:] v12_inplane = v12 - np.sum(v12*one_z, axis=1).reshape((-1,1))*one_z off_in = np.sum(v12_inplane*one_z, axis=1) r12 = pos[:,n_planar-args.arm_width,:] - pos[:,0,:] dist12 = np.sqrt(np.sum((pos[0,n_planar-args.arm_width,:]-pos[0,0,:])**2)) r12 /= dist12 one_y = np.cross(one_z, r12) omega_z = np.sum(v12_inplane*one_y, axis=1)/dist12 dir_v = np.sum(r12*vel_com, axis=1) oz_mean = np.mean(omega_z) print('mean omega_z', oz_mean) print('mean directed velocity', dir_v.mean()) print('rotation radius', dir_v.mean()/oz_mean) if args.plot: import matplotlib.pyplot as plt plt.figure() plt.subplot(221) plt.ylabel('directed velocity') plt.plot(dir_v) plt.subplot(222) plt.hist(dir_v, bins=32, normed=True) plt.axvline(dir_v.mean(), color='red') plt.subplot(223) plt.plot(omega_z) plt.ylabel('rotational velocity') plt.subplot(224) plt.hist(omega_z, bins=32, normed=True) plt.axvline(oz_mean, color='red') plt.figure() x, y, z = pos_com.T plt.plot(x, y) plt.xlabel('x') plt.ylabel('y') plt.show()
bsd-3-clause
yinquan529/platform-external-chromium_org
ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py
154
8545
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import subprocess import sys import tempfile import time script_dir = os.path.dirname(__file__) sys.path.append(os.path.join(script_dir, '../../tools/browser_tester')) import browser_tester import browsertester.browserlauncher # This script extends browser_tester to check for the presence of # Breakpad crash dumps. # This reads a file of lines containing 'key:value' pairs. # The file contains entries like the following: # plat:Win32 # prod:Chromium # ptype:nacl-loader # rept:crash svc def ReadDumpTxtFile(filename): dump_info = {} fh = open(filename, 'r') for line in fh: if ':' in line: key, value = line.rstrip().split(':', 1) dump_info[key] = value fh.close() return dump_info def StartCrashService(browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, crash_service_exe, skip_if_missing=False): # Find crash_service.exe relative to chrome.exe. This is a bit icky. browser_dir = os.path.dirname(browser_path) crash_service_path = os.path.join(browser_dir, crash_service_exe) if skip_if_missing and not os.path.exists(crash_service_path): return proc = subprocess.Popen([crash_service_path, '--v=1', # Verbose output for debugging failures '--dumps-dir=%s' % dumps_dir, '--pipe-name=%s' % windows_pipe_name]) def Cleanup(): # Note that if the process has already exited, this will raise # an 'Access is denied' WindowsError exception, but # crash_service.exe is not supposed to do this and such # behaviour should make the test fail. proc.terminate() status = proc.wait() sys.stdout.write('crash_dump_tester: %s exited with status %s\n' % (crash_service_exe, status)) cleanup_funcs.append(Cleanup) def ListPathsInDir(dir_path): if os.path.exists(dir_path): return [os.path.join(dir_path, name) for name in os.listdir(dir_path)] else: return [] def GetDumpFiles(dumps_dirs): all_files = [filename for dumps_dir in dumps_dirs for filename in ListPathsInDir(dumps_dir)] sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files)) for dump_file in all_files: sys.stdout.write(' %s (size %i)\n' % (dump_file, os.stat(dump_file).st_size)) return [dump_file for dump_file in all_files if dump_file.endswith('.dmp')] def Main(cleanup_funcs): parser = browser_tester.BuildArgParser() parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps', type=int, default=0, help='The number of crash dumps that we should expect') parser.add_option('--expected_process_type_for_crash', dest='expected_process_type_for_crash', type=str, default='nacl-loader', help='The type of Chromium process that we expect the ' 'crash dump to be for') # Ideally we would just query the OS here to find out whether we are # running x86-32 or x86-64 Windows, but Python's win32api module # does not contain a wrapper for GetNativeSystemInfo(), which is # what NaCl uses to check this, or for IsWow64Process(), which is # what Chromium uses. Instead, we just rely on the build system to # tell us. parser.add_option('--win64', dest='win64', action='store_true', help='Pass this if we are running tests for x86-64 Windows') options, args = parser.parse_args() temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_') def CleanUpTempDir(): browsertester.browserlauncher.RemoveDirectory(temp_dir) cleanup_funcs.append(CleanUpTempDir) # To get a guaranteed unique pipe name, use the base name of the # directory we just created. windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir) # This environment variable enables Breakpad crash dumping in # non-official builds of Chromium. os.environ['CHROME_HEADLESS'] = '1' if sys.platform == 'win32': dumps_dir = temp_dir # Override the default (global) Windows pipe name that Chromium will # use for out-of-process crash reporting. os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name # Launch the x86-32 crash service so that we can handle crashes in # the browser process. StartCrashService(options.browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, 'crash_service.exe') if options.win64: # Launch the x86-64 crash service so that we can handle crashes # in the NaCl loader process (nacl64.exe). # Skip if missing, since in win64 builds crash_service.exe is 64-bit # and crash_service64.exe does not exist. StartCrashService(options.browser_path, dumps_dir, windows_pipe_name, cleanup_funcs, 'crash_service64.exe', skip_if_missing=True) # We add a delay because there is probably a race condition: # crash_service.exe might not have finished doing # CreateNamedPipe() before NaCl does a crash dump and tries to # connect to that pipe. # TODO(mseaborn): We could change crash_service.exe to report when # it has successfully created the named pipe. time.sleep(1) elif sys.platform == 'darwin': dumps_dir = temp_dir os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir elif sys.platform.startswith('linux'): # The "--user-data-dir" option is not effective for the Breakpad # setup in Linux Chromium, because Breakpad is initialized before # "--user-data-dir" is read. So we set HOME to redirect the crash # dumps to a temporary directory. home_dir = temp_dir os.environ['HOME'] = home_dir options.enable_crash_reporter = True result = browser_tester.Run(options.url, options) # Find crash dump results. if sys.platform.startswith('linux'): # Look in "~/.config/*/Crash Reports". This will find crash # reports under ~/.config/chromium or ~/.config/google-chrome, or # under other subdirectories in case the branding is changed. dumps_dirs = [os.path.join(path, 'Crash Reports') for path in ListPathsInDir(os.path.join(home_dir, '.config'))] else: dumps_dirs = [dumps_dir] dmp_files = GetDumpFiles(dumps_dirs) failed = False msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' % (len(dmp_files), options.expected_crash_dumps)) if len(dmp_files) != options.expected_crash_dumps: sys.stdout.write(msg) failed = True for dump_file in dmp_files: # Sanity check: Make sure dumping did not fail after opening the file. msg = 'crash_dump_tester: ERROR: Dump file is empty\n' if os.stat(dump_file).st_size == 0: sys.stdout.write(msg) failed = True # On Windows, the crash dumps should come in pairs of a .dmp and # .txt file. if sys.platform == 'win32': second_file = dump_file[:-4] + '.txt' msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding ' '%r file\n' % (dump_file, second_file)) if not os.path.exists(second_file): sys.stdout.write(msg) failed = True continue # Check that the crash dump comes from the NaCl process. dump_info = ReadDumpTxtFile(second_file) if 'ptype' in dump_info: msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n' % (dump_info['ptype'], options.expected_process_type_for_crash)) if dump_info['ptype'] != options.expected_process_type_for_crash: sys.stdout.write(msg) failed = True else: sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n') failed = True # TODO(mseaborn): Ideally we would also check that a backtrace # containing an expected function name can be extracted from the # crash dump. if failed: sys.stdout.write('crash_dump_tester: FAILED\n') result = 1 else: sys.stdout.write('crash_dump_tester: PASSED\n') return result def MainWrapper(): cleanup_funcs = [] try: return Main(cleanup_funcs) finally: for func in cleanup_funcs: func() if __name__ == '__main__': sys.exit(MainWrapper())
bsd-3-clause
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/pandas/core/indexes/api.py
7
4180
from pandas.core.indexes.base import (Index, _new_Index, # noqa _ensure_index, _get_na_value, InvalidIndexError) from pandas.core.indexes.category import CategoricalIndex # noqa from pandas.core.indexes.multi import MultiIndex # noqa from pandas.core.indexes.interval import IntervalIndex # noqa from pandas.core.indexes.numeric import (NumericIndex, Float64Index, # noqa Int64Index, UInt64Index) from pandas.core.indexes.range import RangeIndex # noqa from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.datetimes import DatetimeIndex import pandas.core.common as com from pandas._libs import lib from pandas._libs.tslib import NaT # TODO: there are many places that rely on these private methods existing in # pandas.core.index __all__ = ['Index', 'MultiIndex', 'NumericIndex', 'Float64Index', 'Int64Index', 'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'UInt64Index', 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', '_ensure_index', '_get_na_value', '_get_combined_index', '_get_distinct_indexes', '_union_indexes', '_get_consensus_names', '_all_indexes_same'] def _get_combined_index(indexes, intersect=False): # TODO: handle index names! indexes = _get_distinct_indexes(indexes) if len(indexes) == 0: return Index([]) if len(indexes) == 1: return indexes[0] if intersect: index = indexes[0] for other in indexes[1:]: index = index.intersection(other) return index union = _union_indexes(indexes) return _ensure_index(union) def _get_distinct_indexes(indexes): return list(dict((id(x), x) for x in indexes).values()) def _union_indexes(indexes): if len(indexes) == 0: raise AssertionError('Must have at least 1 Index to union') if len(indexes) == 1: result = indexes[0] if isinstance(result, list): result = Index(sorted(result)) return result indexes, kind = _sanitize_and_check(indexes) def _unique_indices(inds): def conv(i): if isinstance(i, Index): i = i.tolist() return i return Index(lib.fast_unique_multiple_list([conv(i) for i in inds])) if kind == 'special': result = indexes[0] if hasattr(result, 'union_many'): return result.union_many(indexes[1:]) else: for other in indexes[1:]: result = result.union(other) return result elif kind == 'array': index = indexes[0] for other in indexes[1:]: if not index.equals(other): return _unique_indices(indexes) name = _get_consensus_names(indexes)[0] if name != index.name: index = index._shallow_copy(name=name) return index else: return _unique_indices(indexes) def _sanitize_and_check(indexes): kinds = list(set([type(index) for index in indexes])) if list in kinds: if len(kinds) > 1: indexes = [Index(com._try_sort(x)) if not isinstance(x, Index) else x for x in indexes] kinds.remove(list) else: return indexes, 'list' if len(kinds) > 1 or Index not in kinds: return indexes, 'special' else: return indexes, 'array' def _get_consensus_names(indexes): # find the non-none names, need to tupleify to make # the set hashable, then reverse on return consensus_names = set([tuple(i.names) for i in indexes if any(n is not None for n in i.names)]) if len(consensus_names) == 1: return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels def _all_indexes_same(indexes): first = indexes[0] for index in indexes[1:]: if not first.equals(index): return False return True
mit
bthirion/scikit-learn
sklearn/feature_extraction/text.py
2
52194
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Lars Buitinck # Robert Layton <[email protected]> # Jochen Wersdörfer <[email protected]> # Roman Sinayev <[email protected]> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ from __future__ import unicode_literals import array from collections import Mapping, defaultdict import numbers from operator import itemgetter import re import unicodedata import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.six.moves import xrange from ..preprocessing import normalize from .hashing import FeatureHasher from .stop_words import ENGLISH_STOP_WORDS from ..utils.fixes import frombuffer_empty, bincount from ..utils.validation import check_is_fitted __all__ = ['CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. See also -------- strip_accents_ascii Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ normalized = unicodedata.normalize('NFKD', s) if normalized == s: return s else: return ''.join([c for c in normalized if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. See also -------- strip_accents_unicode Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None else: # assume it's a collection return frozenset(stop) class VectorizerMixin(object): """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or " "unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens tokens = [] n_original_tokens = len(original_tokens) for n in xrange(min_n, min(max_n + 1, n_original_tokens + 1)): for i in xrange(n_original_tokens - n + 1): tokens.append(" ".join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) ngrams = [] min_n, max_n = self.ngram_range for n in xrange(min_n, min(max_n + 1, text_len + 1)): for i in xrange(text_len - n + 1): ngrams.append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams operating only inside word boundaries. n-grams at the edges of words are padded with space.""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in xrange(min_n, max_n + 1): offset = 0 ngrams.append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams.append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" if self.preprocessor is not None: return self.preprocessor # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the cost of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) if self.lowercase: return lambda x: strip_accents(x.lower()) else: return strip_accents def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens""" if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) return lambda doc: token_pattern.findall(doc) def get_stop_words(self): """Build or fetch the effective stop words list""" return _check_stop_list(self.stop_words) def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if self.analyzer == 'char': return lambda doc: self._char_ngrams(preprocess(self.decode(doc))) elif self.analyzer == 'char_wb': return lambda doc: self._char_wb_ngrams( preprocess(self.decode(doc))) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return lambda doc: self._word_ngrams( tokenize(preprocess(self.decode(doc))), stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _validate_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if isinstance(vocabulary, set): vocabulary = sorted(vocabulary) if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(six.itervalues(vocabulary)) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in xrange(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False def _check_vocabulary(self): """Check if vocabulary is empty or missing (not fit-ed)""" msg = "%(name)s - Vocabulary wasn't fitted." check_is_fitted(self, 'vocabulary_', msg=msg), if len(self.vocabulary_) == 0: raise ValueError("Vocabulary is empty") class HashingVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. lowercase : boolean, default=True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). n_features : integer, default=(2 ** 20) The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. binary : boolean, default=False. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). non_negative : boolean, default=False Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. See also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', non_negative=False, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.non_negative = non_negative self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless.""" # triggers a parameter validation if isinstance(X, six.string_types): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._get_hasher().fit(X, y=y) return self def transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix. """ if isinstance(X, six.string_types): raise ValueError( "Iterable over raw text documents expected, " "string object received.") analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X # Alias transform to fit_transform for convenience fit_transform = transform def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, non_negative=self.non_negative) def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return bincount(X.indices, minlength=X.shape[1]) else: return np.diff(sp.csc_matrix(X, copy=False).indptr) class CountVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.csr_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, True by default Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : boolean, default=False If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- HashingVectorizer, TfidfVectorizer Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df or min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): vocabulary[term] = new_val map_index[old_val] = new_val X.indices = map_index.take(X.indices, mode='clip') return X def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = [] indptr = _make_int_array() values = _make_int_array() indptr.append(0) for doc in raw_documents: feature_counter = {} for feature in analyze(doc): try: feature_idx = vocabulary[feature] if feature_idx not in feature_counter: feature_counter[feature_idx] = 1 else: feature_counter[feature_idx] += 1 except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue j_indices.extend(feature_counter.keys()) values.extend(feature_counter.values()) indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") j_indices = np.asarray(j_indices, dtype=np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) values = frombuffer_empty(values, dtype=np.intc) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sort_indices() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. if isinstance(raw_documents, six.string_types): raise ValueError( "Iterable over raw text documents expected, " "string object received.") self._validate_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if isinstance(raw_documents, six.string_types): raise ValueError( "Iterable over raw text documents expected, " "string object received.") if not hasattr(self, 'vocabulary_'): self._validate_vocabulary() self._check_vocabulary() # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array, sparse matrix}, shape = [n_samples, n_features] Returns ------- X_inv : list of arrays, len = n_samples List of arrays of terms. """ self._check_vocabulary() if sp.issparse(X): # We need CSR format for fast row manipulations. X = X.tocsr() else: # We need to convert X to a matrix, so that the indexing # returns 2D objects X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name""" self._check_vocabulary() return [t for t, i in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))] def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(BaseEstimator, TransformerMixin): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The formula that is used to compute the tf-idf of term t is tf-idf(d, t) = tf(t) * idf(d, t), and the idf is computed as idf(d, t) = log [ n / df(d, t) ] + 1 (if ``smooth_idf=False``), where n is the total number of documents and df(d, t) is the document frequency; the document frequency is the number of documents d that contain term t. The effect of adding "1" to the idf in the equation above is that terms with zero idf, i.e., terms that occur in all documents in a training set, will not be entirely ignored. (Note that the idf formula above differs from the standard textbook notation that defines the idf as idf(d, t) = log [ n / (df(d, t) + 1) ]). If ``smooth_idf=True`` (the default), the constant "1" is added to the numerator and denominator of the idf as if an extra document was seen containing every term in the collection exactly once, which prevents zero divisions: idf(d, t) = log [ (1 + n) / (1 + df(d, t)) ] + 1. Furthermore, the formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR as follows: Tf is "n" (natural) by default, "l" (logarithmic) when ``sublinear_tf=True``. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when ``norm='l2'``, "n" (none) when ``norm=None``. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). References ---------- .. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74.` .. [MRS2008] `C.D. Manning, P. Raghavan and H. Schütze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120.` """ def __init__(self, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights) Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts """ if not sp.issparse(X): X = sp.csc_matrix(X) if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log+1 instead of log makes sure terms with zero idf don't get # suppressed entirely. idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features, format='csr') return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- vectors : sparse matrix, [n_samples, n_features] """ if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float): # preserve float family dtype X = sp.csr_matrix(X, copy=copy) else: # convert counts or binary occurrences to floats X = sp.csr_matrix(X, dtype=np.float64, copy=copy) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: check_is_fitted(self, '_idf_diag', 'idf vector is not fitted') expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % ( n_features, expected_n_features)) # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): # if _idf_diag is not set, this will raise an attribute error, # which means hasattr(self, "idf_") is False return np.ravel(self._idf_diag.sum(axis=0)) class TfidfVectorizer(CountVectorizer): """Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer. Read more in the :ref:`User Guide <text_feature_extraction>`. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char'} or callable Whether the feature should be made of word or character n-grams. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, default True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : int or None, default=None If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : boolean, default=False If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs.) dtype : type, optional Type of the matrix returned by fit_transform() or transform(). norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, default=True Enable inverse-document-frequency reweighting. smooth_idf : boolean, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- vocabulary_ : dict A mapping of terms to feature indices. idf_ : array, shape = [n_features], or None The learned idf vector (global term weights) when ``use_idf`` is set to True, None otherwise. stop_words_ : set Terms that were ignored because they either: - occurred in too many documents (`max_df`) - occurred in too few documents (`min_df`) - were cut off by feature selection (`max_features`). This is only available if no vocabulary was given. See also -------- CountVectorizer Tokenize the documents and count the occurrences of token and return them as a sparse matrix TfidfTransformer Apply Term Frequency Inverse Document Frequency normalization to a sparse matrix of occurrence counts. Notes ----- The ``stop_words_`` attribute can get large and increase the model size when pickling. This attribute is provided only for introspection and can be safely removed using delattr or set to None before pickling. """ def __init__(self, input='content', encoding='utf-8', decode_error='strict', strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super(TfidfVectorizer, self).__init__( input=input, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- self : TfidfVectorizer """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents, copy=True): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects copy : boolean, default True Whether to copy X and operate on the copy or perform in-place operations. Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted') X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
bsd-3-clause
themrmax/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
73
2264
"""Build a language detector model The goal of this exercise is to train a linear classifier on text features that represent sequences of up to 3 consecutive characters so as to be recognize natural languages by using the frequencies of short character sequences as 'fingerprints'. """ # Author: Olivier Grisel <[email protected]> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import Perceptron from sklearn.pipeline import Pipeline from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics # The training data folder must be passed as first argument languages_data_folder = sys.argv[1] dataset = load_files(languages_data_folder) # Split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.5) # TASK: Build a vectorizer that splits strings into sequence of 1 to 3 # characters instead of word tokens vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char', use_idf=False) # TASK: Build a vectorizer / classifier pipeline using the previous analyzer # the pipeline instance should stored in a variable named clf clf = Pipeline([ ('vec', vectorizer), ('clf', Perceptron()), ]) # TASK: Fit the pipeline on the training set clf.fit(docs_train, y_train) # TASK: Predict the outcome on the testing set in a variable named y_predicted y_predicted = clf.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) #import matlotlib.pyplot as plt #plt.matshow(cm, cmap=plt.cm.jet) #plt.show() # Predict the result on some short new sentences: sentences = [ u'This is a language detection test.', u'Ceci est un test de d\xe9tection de la langue.', u'Dies ist ein Test, um die Sprache zu erkennen.', ] predicted = clf.predict(sentences) for s, p in zip(sentences, predicted): print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
bsd-3-clause
mjgrav2001/scikit-learn
doc/sphinxext/gen_rst.py
142
40026
""" Example generation for the scikit learn Generate the rst files for the examples by iterating over the python example files. Files that generate images should start with 'plot' """ from __future__ import division, print_function from time import time import ast import os import re import shutil import traceback import glob import sys import gzip import posixpath import subprocess import warnings from sklearn.externals import six # Try Python 2 first, otherwise load from Python 3 try: from StringIO import StringIO import cPickle as pickle import urllib2 as urllib from urllib2 import HTTPError, URLError except ImportError: from io import StringIO import pickle import urllib.request import urllib.error import urllib.parse from urllib.error import HTTPError, URLError try: # Python 2 built-in execfile except NameError: def execfile(filename, global_vars=None, local_vars=None): with open(filename, encoding='utf-8') as f: code = compile(f.read(), filename, 'exec') exec(code, global_vars, local_vars) try: basestring except NameError: basestring = str import token import tokenize import numpy as np try: # make sure that the Agg backend is set before importing any # matplotlib import matplotlib matplotlib.use('Agg') except ImportError: # this script can be imported by nosetest to find tests to run: we should not # impose the matplotlib requirement in that case. pass from sklearn.externals import joblib ############################################################################### # A tee object to redict streams to multiple outputs class Tee(object): def __init__(self, file1, file2): self.file1 = file1 self.file2 = file2 def write(self, data): self.file1.write(data) self.file2.write(data) def flush(self): self.file1.flush() self.file2.flush() ############################################################################### # Documentation link resolver objects def _get_data(url): """Helper function to get data over http or from a local file""" if url.startswith('http://'): # Try Python 2, use Python 3 on exception try: resp = urllib.urlopen(url) encoding = resp.headers.dict.get('content-encoding', 'plain') except AttributeError: resp = urllib.request.urlopen(url) encoding = resp.headers.get('content-encoding', 'plain') data = resp.read() if encoding == 'plain': pass elif encoding == 'gzip': data = StringIO(data) data = gzip.GzipFile(fileobj=data).read() else: raise RuntimeError('unknown encoding') else: with open(url, 'r') as fid: data = fid.read() fid.close() return data mem = joblib.Memory(cachedir='_build') get_data = mem.cache(_get_data) def parse_sphinx_searchindex(searchindex): """Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index. """ def _select_block(str_in, start_tag, end_tag): """Select first block delimited by start_tag and end_tag""" start_pos = str_in.find(start_tag) if start_pos < 0: raise ValueError('start_tag not found') depth = 0 for pos in range(start_pos, len(str_in)): if str_in[pos] == start_tag: depth += 1 elif str_in[pos] == end_tag: depth -= 1 if depth == 0: break sel = str_in[start_pos + 1:pos] return sel def _parse_dict_recursive(dict_str): """Parse a dictionary from the search index""" dict_out = dict() pos_last = 0 pos = dict_str.find(':') while pos >= 0: key = dict_str[pos_last:pos] if dict_str[pos + 1] == '[': # value is a list pos_tmp = dict_str.find(']', pos + 1) if pos_tmp < 0: raise RuntimeError('error when parsing dict') value = dict_str[pos + 2: pos_tmp].split(',') # try to convert elements to int for i in range(len(value)): try: value[i] = int(value[i]) except ValueError: pass elif dict_str[pos + 1] == '{': # value is another dictionary subdict_str = _select_block(dict_str[pos:], '{', '}') value = _parse_dict_recursive(subdict_str) pos_tmp = pos + len(subdict_str) else: raise ValueError('error when parsing dict: unknown elem') key = key.strip('"') if len(key) > 0: dict_out[key] = value pos_last = dict_str.find(',', pos_tmp) if pos_last < 0: break pos_last += 1 pos = dict_str.find(':', pos_last) return dict_out # Make sure searchindex uses UTF-8 encoding if hasattr(searchindex, 'decode'): searchindex = searchindex.decode('UTF-8') # parse objects query = 'objects:' pos = searchindex.find(query) if pos < 0: raise ValueError('"objects:" not found in search index') sel = _select_block(searchindex[pos:], '{', '}') objects = _parse_dict_recursive(sel) # parse filenames query = 'filenames:' pos = searchindex.find(query) if pos < 0: raise ValueError('"filenames:" not found in search index') filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find(']')] filenames = [f.strip('"') for f in filenames.split(',')] return filenames, objects class SphinxDocLinkResolver(object): """ Resolve documentation links using searchindex.js generated by Sphinx Parameters ---------- doc_url : str The base URL of the project website. searchindex : str Filename of searchindex, relative to doc_url. extra_modules_test : list of str List of extra module names to test. relative : bool Return relative links (only useful for links to documentation of this package). """ def __init__(self, doc_url, searchindex='searchindex.js', extra_modules_test=None, relative=False): self.doc_url = doc_url self.relative = relative self._link_cache = {} self.extra_modules_test = extra_modules_test self._page_cache = {} if doc_url.startswith('http://'): if relative: raise ValueError('Relative links are only supported for local ' 'URLs (doc_url cannot start with "http://)"') searchindex_url = doc_url + '/' + searchindex else: searchindex_url = os.path.join(doc_url, searchindex) # detect if we are using relative links on a Windows system if os.name.lower() == 'nt' and not doc_url.startswith('http://'): if not relative: raise ValueError('You have to use relative=True for the local' ' package on a Windows system.') self._is_windows = True else: self._is_windows = False # download and initialize the search index sindex = get_data(searchindex_url) filenames, objects = parse_sphinx_searchindex(sindex) self._searchindex = dict(filenames=filenames, objects=objects) def _get_link(self, cobj): """Get a valid link, False if not found""" fname_idx = None full_name = cobj['module_short'] + '.' + cobj['name'] if full_name in self._searchindex['objects']: value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[next(iter(value.keys()))] fname_idx = value[0] elif cobj['module_short'] in self._searchindex['objects']: value = self._searchindex['objects'][cobj['module_short']] if cobj['name'] in value.keys(): fname_idx = value[cobj['name']][0] if fname_idx is not None: fname = self._searchindex['filenames'][fname_idx] + '.html' if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if hasattr(link, 'decode'): link = link.decode('utf-8', 'replace') if link in self._page_cache: html = self._page_cache[link] else: html = get_data(link) self._page_cache[link] = html # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] if self.extra_modules_test is not None: for mod in self.extra_modules_test: comb_names.append(mod + '.' + cobj['name']) url = False if hasattr(html, 'decode'): # Decode bytes under Python 3 html = html.decode('utf-8', 'replace') for comb_name in comb_names: if hasattr(comb_name, 'decode'): # Decode bytes under Python 3 comb_name = comb_name.decode('utf-8', 'replace') if comb_name in html: url = link + u'#' + comb_name link = url else: link = False return link def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobi['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str | None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link ############################################################################### rst_template = """ .. _example_%(short_fname)s: %(docstring)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- """ plot_rst_template = """ .. _example_%(short_fname)s: %(docstring)s %(image_list)s %(stdout)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- **Total running time of the example:** %(time_elapsed) .2f seconds (%(time_m) .0f minutes %(time_s) .2f seconds) """ # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. HLIST_HEADER = """ .. rst-class:: horizontal """ HLIST_IMAGE_TEMPLATE = """ * .. image:: images/%s :scale: 47 """ SINGLE_IMAGE = """ .. image:: images/%s :align: center """ # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600), 'plot_outlier_detection_001.png': (3, 372), 'plot_gp_regression_001.png': (2, 250), 'plot_adaboost_twoclass_001.png': (1, 372), 'plot_compare_methods_001.png': (1, 349)} def extract_docstring(filename, ignore_heading=False): """ Extract a module-level docstring, if any """ if six.PY2: lines = open(filename).readlines() else: lines = open(filename, encoding='utf-8').readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) for tok_type, tok_content, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif tok_type == 'STRING': docstring = eval(tok_content) # If the docstring is formatted with several paragraphs, extract # the first one: paragraphs = '\n'.join( line.rstrip() for line in docstring.split('\n')).split('\n\n') if paragraphs: if ignore_heading: if len(paragraphs) > 1: first_par = re.sub('\n', ' ', paragraphs[1]) first_par = ((first_par[:95] + '...') if len(first_par) > 95 else first_par) else: raise ValueError("Docstring not found by gallery.\n" "Please check the layout of your" " example file:\n {}\n and make sure" " it's correct".format(filename)) else: first_par = paragraphs[0] break return docstring, first_par, erow + 1 + start_row def generate_example_rst(app): """ Generate the list of examples, as well as the contents of examples. """ root_dir = os.path.join(app.builder.srcdir, 'auto_examples') example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..', 'examples')) generated_dir = os.path.abspath(os.path.join(app.builder.srcdir, 'modules', 'generated')) try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(example_dir): os.makedirs(example_dir) if not os.path.exists(root_dir): os.makedirs(root_dir) if not os.path.exists(generated_dir): os.makedirs(generated_dir) # we create an index.rst with all examples fhindex = open(os.path.join(root_dir, 'index.rst'), 'w') # Note: The sidebar button has been removed from the examples page for now # due to how it messes up the layout. Will be fixed at a later point fhindex.write("""\ .. raw:: html <style type="text/css"> div#sidebarbutton { /* hide the sidebar collapser, while ensuring vertical arrangement */ display: none; } </style> .. _examples-index: Examples ======== """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. seen_backrefs = set() generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) for directory in sorted(os.listdir(example_dir)): if os.path.isdir(os.path.join(example_dir, directory)): generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs) fhindex.flush() def extract_line_count(filename, target_dir): # Extract the line count of a file example_file = os.path.join(target_dir, filename) if six.PY2: lines = open(example_file).readlines() else: lines = open(example_file, encoding='utf-8').readlines() start_row = 0 if lines and lines[0].startswith('#!'): lines.pop(0) start_row = 1 line_iterator = iter(lines) tokens = tokenize.generate_tokens(lambda: next(line_iterator)) check_docstring = True erow_docstring = 0 for tok_type, _, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif (tok_type == 'STRING') and check_docstring: erow_docstring = erow check_docstring = False return erow_docstring+1+start_row, erow+1+start_row def line_count_sort(file_list, target_dir): # Sort the list of examples by line-count new_list = [x for x in file_list if x.endswith('.py')] unsorted = np.zeros(shape=(len(new_list), 2)) unsorted = unsorted.astype(np.object) for count, exmpl in enumerate(new_list): docstr_lines, total_lines = extract_line_count(exmpl, target_dir) unsorted[count][1] = total_lines - docstr_lines unsorted[count][0] = exmpl index = np.lexsort((unsorted[:, 0].astype(np.str), unsorted[:, 1].astype(np.float))) if not len(unsorted): return [] return np.array(unsorted[index][:, 0]).tolist() def _thumbnail_div(subdir, full_dir, fname, snippet): """Generates RST to place a thumbnail in a gallery""" thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png') link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_') ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_') if ref_name.startswith('._'): ref_name = ref_name[2:] out = [] out.append(""" .. raw:: html <div class="thumbnailContainer" tooltip="{}"> """.format(snippet)) out.append('.. figure:: %s\n' % thumb) if link_name.startswith('._'): link_name = link_name[2:] if full_dir != '.': out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3])) else: out.append(' :target: ./%s.html\n\n' % link_name[:-3]) out.append(""" :ref:`example_%s` .. raw:: html </div> """ % (ref_name)) return ''.join(out) def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs): """ Generate the rst file for an example directory. """ if not directory == '.': target_dir = os.path.join(root_dir, directory) src_dir = os.path.join(example_dir, directory) else: target_dir = root_dir src_dir = example_dir if not os.path.exists(os.path.join(src_dir, 'README.txt')): raise ValueError('Example directory %s does not have a README.txt' % src_dir) fhindex.write(""" %s """ % open(os.path.join(src_dir, 'README.txt')).read()) if not os.path.exists(target_dir): os.makedirs(target_dir) sorted_listdir = line_count_sort(os.listdir(src_dir), src_dir) if not os.path.exists(os.path.join(directory, 'images', 'thumb')): os.makedirs(os.path.join(directory, 'images', 'thumb')) for fname in sorted_listdir: if fname.endswith('py'): backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery) new_fname = os.path.join(src_dir, fname) _, snippet, _ = extract_docstring(new_fname, True) fhindex.write(_thumbnail_div(directory, directory, fname, snippet)) fhindex.write(""" .. toctree:: :hidden: %s/%s """ % (directory, fname[:-3])) for backref in backrefs: include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref) seen = backref in seen_backrefs with open(include_path, 'a' if seen else 'w') as ex_file: if not seen: # heading print(file=ex_file) print('Examples using ``%s``' % backref, file=ex_file) print('-----------------%s--' % ('-' * len(backref)), file=ex_file) print(file=ex_file) rel_dir = os.path.join('../../auto_examples', directory) ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet)) seen_backrefs.add(backref) fhindex.write(""" .. raw:: html <div class="clearer"></div> """) # clear at the end of the section # modules for which we embed links into example code DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy'] def make_thumbnail(in_fname, out_fname, width, height): """Make a thumbnail with the same aspect ratio centered in an image with a given width and height """ # local import to avoid testing dependency on PIL: try: from PIL import Image except ImportError: import Image img = Image.open(in_fname) width_in, height_in = img.size scale_w = width / float(width_in) scale_h = height / float(height_in) if height_in * scale_w <= height: scale = scale_w else: scale = scale_h width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (width, height), (255, 255, 255)) pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2) thumb.paste(img, pos_insert) thumb.save(out_fname) # Use optipng to perform lossless compression on the resized image if # software is installed if os.environ.get('SKLEARN_DOC_OPTIPNG', False): try: subprocess.call(["optipng", "-quiet", "-o", "9", out_fname]) except Exception: warnings.warn('Install optipng to reduce the size of the generated images') def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) except ImportError: # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name class NameFinder(ast.NodeVisitor): """Finds the longest form of variable names and their imports in code Only retains names from imported modules. """ def __init__(self): super(NameFinder, self).__init__() self.imported_names = {} self.accessed_names = set() def visit_Import(self, node, prefix=''): for alias in node.names: local_name = alias.asname or alias.name self.imported_names[local_name] = prefix + alias.name def visit_ImportFrom(self, node): self.visit_Import(node, node.module + '.') def visit_Name(self, node): self.accessed_names.add(node.id) def visit_Attribute(self, node): attrs = [] while isinstance(node, ast.Attribute): attrs.append(node.attr) node = node.value if isinstance(node, ast.Name): # This is a.b, not e.g. a().b attrs.append(node.id) self.accessed_names.add('.'.join(reversed(attrs))) else: # need to get a in a().b self.visit(node) def get_mapping(self): for name in self.accessed_names: local_name = name.split('.', 1)[0] remainder = name[len(local_name):] if local_name in self.imported_names: # Join import path to relative path full_name = self.imported_names[local_name] + remainder yield name, full_name def identify_names(code): """Builds a codeobj summary by identifying and resovles used names >>> code = ''' ... from a.b import c ... import d as e ... print(c) ... e.HelloWorld().f.g ... ''' >>> for name, o in sorted(identify_names(code).items()): ... print(name, o['name'], o['module'], o['module_short']) c c a.b a.b e.HelloWorld HelloWorld d d """ finder = NameFinder() finder.visit(ast.parse(code)) example_code_obj = {} for name, full_name in finder.get_mapping(): # name is as written in file (e.g. np.asarray) # full_name includes resolved import path (e.g. numpy.asarray) module, attribute = full_name.rsplit('.', 1) # get shortened module name module_short = get_short_module_name(module, attribute) cobj = {'name': attribute, 'module': module, 'module_short': module_short} example_code_obj[name] = cobj return example_code_obj def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery): """ Generate the rst file for a given example. Returns the set of sklearn functions/classes imported in the example. """ base_image_name = os.path.splitext(fname)[0] image_fname = '%s_%%03d.png' % base_image_name this_template = rst_template last_dir = os.path.split(src_dir)[-1] # to avoid leading . in file names, and wrong names in links if last_dir == '.' or last_dir == 'examples': last_dir = '' else: last_dir += '_' short_fname = last_dir + fname src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) # The following is a list containing all the figure names figure_list = [] image_dir = os.path.join(target_dir, 'images') thumb_dir = os.path.join(image_dir, 'thumb') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) image_path = os.path.join(image_dir, image_fname) stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name) time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name) thumb_file = os.path.join(thumb_dir, base_image_name + '.png') time_elapsed = 0 if plot_gallery and fname.startswith('plot'): # generate the plot as png image if file name # starts with plot and if it is more recent than an # existing image. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if not os.path.exists(first_image_file) or \ os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime: # We need to execute the code print('plotting %s' % fname) t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip().expandtabs() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers() for fig_mngr in fig_managers: # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. fig = plt.figure(fig_mngr.num) kwargs = {} to_rgba = matplotlib.colors.colorConverter.to_rgba for attr in ['facecolor', 'edgecolor']: fig_attr = getattr(fig, 'get_' + attr)() default_attr = matplotlib.rcParams['figure.' + attr] if to_rgba(fig_attr) != to_rgba(default_attr): kwargs[attr] = fig_attr fig.savefig(image_path % fig_mngr.num, **kwargs) figure_list.append(image_fname % fig_mngr.num) except: print(80 * '_') print('%s is not compiling:' % fname) traceback.print_exc() print(80 * '_') finally: os.chdir(cwd) sys.stdout = orig_stdout print(" - time elapsed : %.2g sec" % time_elapsed) else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path.replace("%03d", '[0-9][0-9][0-9]'))] figure_list.sort() # generate thumb file this_template = plot_rst_template car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/') # Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file` # which is within `auto_examples/../images/thumbs` depending on the example. # Because the carousel has different dimensions than those of the examples gallery, # I did not simply reuse them all as some contained whitespace due to their default gallery # thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't # just be overwritten with the carousel dimensions as it messes up the examples gallery layout). # The special carousel thumbnails are written directly to _build/html/stable/_images/, # as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the # auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to # have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then # copied to the _images folder during the `Copying Downloadable Files` step like the rest. if not os.path.exists(car_thumb_path): os.makedirs(car_thumb_path) if os.path.exists(first_image_file): # We generate extra special thumbnails for the carousel carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png') first_img = image_fname % 1 if first_img in carousel_thumbs: make_thumbnail((image_path % carousel_thumbs[first_img][0]), carousel_tfile, carousel_thumbs[first_img][1], 190) make_thumbnail(first_image_file, thumb_file, 400, 280) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 200, 140) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') time_m, time_s = divmod(time_elapsed, 60) f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w') f.write(this_template % locals()) f.flush() # save variables so we can later add links to the documentation if six.PY2: example_code_obj = identify_names(open(example_file).read()) else: example_code_obj = \ identify_names(open(example_file, encoding='utf-8').read()) if example_code_obj: codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL) backrefs = set('{module_short}.{name}'.format(**entry) for entry in example_code_obj.values() if entry['module'].startswith('sklearn')) return backrefs def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" if exception is not None: return print('Embedding documentation hyperlinks in examples..') if app.builder.name == 'latex': # Don't embed hyperlinks when a latex builder is used. return # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) resolver_urls = { 'matplotlib': 'http://matplotlib.org', 'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0', 'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference', } for this_module, url in resolver_urls.items(): try: doc_resolvers[this_module] = SphinxDocLinkResolver(url) except HTTPError as e: print("The following HTTP Error has occurred:\n") print(e.code) except URLError as e: print("\n...\n" "Warning: Embedding the documentation hyperlinks requires " "internet access.\nPlease check your network connection.\n" "Unable to continue embedding `{0}` links due to a URL " "Error:\n".format(this_module)) print(e.args) example_dir = os.path.join(app.builder.srcdir, 'auto_examples') html_example_dir = os.path.abspath(os.path.join(app.builder.outdir, 'auto_examples')) # patterns for replacement link_pattern = '<a href="%s">%s</a>' orig_pattern = '<span class="n">%s</span>' period = '<span class="o">.</span>' for dirpath, _, filenames in os.walk(html_example_dir): for fname in filenames: print('\tprocessing: %s' % fname) full_fname = os.path.join(html_example_dir, dirpath, fname) subpath = dirpath[len(html_example_dir) + 1:] pickle_fname = os.path.join(example_dir, subpath, fname[:-5] + '_codeobj.pickle') if os.path.exists(pickle_fname): # we have a pickle file with the objects to embed links for with open(pickle_fname, 'rb') as fid: example_code_obj = pickle.load(fid) fid.close() str_repl = {} # generate replacement strings with the links for name, cobj in example_code_obj.items(): this_module = cobj['module'].split('.')[0] if this_module not in doc_resolvers: continue try: link = doc_resolvers[this_module].resolve(cobj, full_fname) except (HTTPError, URLError) as e: print("The following error has occurred:\n") print(repr(e)) continue if link is not None: parts = name.split('.') name_html = period.join(orig_pattern % part for part in parts) str_repl[name_html] = link_pattern % (link, name_html) # do the replacement in the html file # ensure greediness names = sorted(str_repl, key=len, reverse=True) expr = re.compile(r'(?<!\.)\b' + # don't follow . or word '|'.join(re.escape(name) for name in names)) def substitute_link(match): return str_repl[match.group()] if len(str_repl) > 0: with open(full_fname, 'rb') as fid: lines_in = fid.readlines() with open(full_fname, 'wb') as fid: for line in lines_in: line = line.decode('utf-8') line = expr.sub(substitute_link, line) fid.write(line.encode('utf-8')) print('[done]') def setup(app): app.connect('builder-inited', generate_example_rst) app.add_config_value('plot_gallery', True, 'html') # embed links after build is finished app.connect('build-finished', embed_code_links) # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The problem is, # the directory is never cleared. This means that each time you build # the docs, the number of images in the directory grows. # # This question has been asked on the sphinx development list, but there # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the # image build directory each time the docs are built. If sphinx # changes their layout between versions, this will not work (though # it should probably not cause a crash). Tested successfully # on Sphinx 1.0.7 build_image_dir = '_build/html/_images' if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) for filename in filelist: if filename.endswith('png'): os.remove(os.path.join(build_image_dir, filename)) def setup_module(): # HACK: Stop nosetests running setup() above pass
bsd-3-clause
ClimbsRocks/scikit-learn
examples/applications/plot_outlier_detection_housing.py
110
5681
""" ==================================== Outlier detection on a real data set ==================================== This example illustrates the need for robust covariance estimation on a real data set. It is useful both for outlier detection and for a better understanding of the data structure. We selected two sets of two variables from the Boston housing data set as an illustration of what kind of analysis can be done with several outlier detection tools. For the purpose of visualization, we are working with two-dimensional examples, but one should be aware that things are not so trivial in high-dimension, as it will be pointed out. In both examples below, the main result is that the empirical covariance estimate, as a non-robust one, is highly influenced by the heterogeneous structure of the observations. Although the robust covariance estimate is able to focus on the main mode of the data distribution, it sticks to the assumption that the data should be Gaussian distributed, yielding some biased estimation of the data structure, but yet accurate to some extent. The One-Class SVM does not assume any parametric form of the data distribution and can therefore model the complex shape of the data much better. First example ------------- The first example illustrates how robust covariance estimation can help concentrating on a relevant cluster when another one exists. Here, many observations are confounded into one and break down the empirical covariance estimation. Of course, some screening tools would have pointed out the presence of two clusters (Support Vector Machines, Gaussian Mixture Models, univariate outlier detection, ...). But had it been a high-dimensional example, none of these could be applied that easily. Second example -------------- The second example shows the ability of the Minimum Covariance Determinant robust estimator of covariance to concentrate on the main mode of the data distribution: the location seems to be well estimated, although the covariance is hard to estimate due to the banana-shaped distribution. Anyway, we can get rid of some outlying observations. The One-Class SVM is able to capture the real data structure, but the difficulty is to adjust its kernel bandwidth parameter so as to obtain a good compromise between the shape of the data scatter matrix and the risk of over-fitting the data. """ print(__doc__) # Author: Virgile Fritsch <[email protected]> # License: BSD 3 clause import numpy as np from sklearn.covariance import EllipticEnvelope from sklearn.svm import OneClassSVM import matplotlib.pyplot as plt import matplotlib.font_manager from sklearn.datasets import load_boston # Get data X1 = load_boston()['data'][:, [8, 10]] # two clusters X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped # Define "classifiers" to be used classifiers = { "Empirical Covariance": EllipticEnvelope(support_fraction=1., contamination=0.261), "Robust Covariance (Minimum Covariance Determinant)": EllipticEnvelope(contamination=0.261), "OCSVM": OneClassSVM(nu=0.261, gamma=0.05)} colors = ['m', 'g', 'b'] legend1 = {} legend2 = {} # Learn a frontier for outlier detection with several classifiers xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500)) xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500)) for i, (clf_name, clf) in enumerate(classifiers.items()): plt.figure(1) clf.fit(X1) Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()]) Z1 = Z1.reshape(xx1.shape) legend1[clf_name] = plt.contour( xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i]) plt.figure(2) clf.fit(X2) Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()]) Z2 = Z2.reshape(xx2.shape) legend2[clf_name] = plt.contour( xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i]) legend1_values_list = list(legend1.values()) legend1_keys_list = list(legend1.keys()) # Plot the results (= shape of the data points cloud) plt.figure(1) # two clusters plt.title("Outlier detection on a real data set (boston housing)") plt.scatter(X1[:, 0], X1[:, 1], color='black') bbox_args = dict(boxstyle="round", fc="0.8") arrow_args = dict(arrowstyle="->") plt.annotate("several confounded points", xy=(24, 19), xycoords="data", textcoords="data", xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args) plt.xlim((xx1.min(), xx1.max())) plt.ylim((yy1.min(), yy1.max())) plt.legend((legend1_values_list[0].collections[0], legend1_values_list[1].collections[0], legend1_values_list[2].collections[0]), (legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]), loc="upper center", prop=matplotlib.font_manager.FontProperties(size=12)) plt.ylabel("accessibility to radial highways") plt.xlabel("pupil-teacher ratio by town") legend2_values_list = list(legend2.values()) legend2_keys_list = list(legend2.keys()) plt.figure(2) # "banana" shape plt.title("Outlier detection on a real data set (boston housing)") plt.scatter(X2[:, 0], X2[:, 1], color='black') plt.xlim((xx2.min(), xx2.max())) plt.ylim((yy2.min(), yy2.max())) plt.legend((legend2_values_list[0].collections[0], legend2_values_list[1].collections[0], legend2_values_list[2].collections[0]), (legend2_keys_list[0], legend2_keys_list[1], legend2_keys_list[2]), loc="upper center", prop=matplotlib.font_manager.FontProperties(size=12)) plt.ylabel("% lower status of the population") plt.xlabel("average number of rooms per dwelling") plt.show()
bsd-3-clause
lbishal/scikit-learn
examples/svm/plot_svm_kernels.py
329
1971
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= SVM-Kernels ========================================================= Three different types of SVM-Kernels are displayed below. The polynomial and RBF are especially useful when the data-points are not linearly separable. """ print(__doc__) # Code source: Gaël Varoquaux # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import svm # Our dataset and targets X = np.c_[(.4, -.7), (-1.5, -1), (-1.4, -.9), (-1.3, -1.2), (-1.1, -.2), (-1.2, -.4), (-.5, 1.2), (-1.5, 2.1), (1, 1), # -- (1.3, .8), (1.2, .5), (.2, -2), (.5, -2.4), (.2, -2.3), (0, -2.7), (1.3, 2.1)].T Y = [0] * 8 + [1] * 8 # figure number fignum = 1 # fit the model for kernel in ('linear', 'poly', 'rbf'): clf = svm.SVC(kernel=kernel, gamma=2) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors='none', zorder=10) plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired) plt.axis('tight') x_min = -3 x_max = 3 y_min = -3 y_max = 3 XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.figure(fignum, figsize=(4, 3)) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
bsd-3-clause
harisbal/pandas
pandas/io/common.py
1
19676
"""Common IO api utilities""" import codecs from contextlib import closing, contextmanager import csv import mmap import os import zipfile import pandas.compat as compat from pandas.compat import BytesIO, StringIO, string_types, text_type from pandas.errors import ( # noqa DtypeWarning, EmptyDataError, ParserError, ParserWarning) from pandas.core.dtypes.common import is_file_like, is_number import pandas.core.common as com from pandas.io.formats.printing import pprint_thing # gh-12665: Alias for now and remove later. CParserError = ParserError # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', _NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', ''} if compat.PY3: from urllib.request import urlopen, pathname2url _urlopen = urlopen from urllib.parse import urlparse as parse_url from urllib.parse import (uses_relative, uses_netloc, uses_params, urlencode, urljoin) from urllib.error import URLError from http.client import HTTPException # noqa else: from urllib2 import urlopen as _urlopen from urllib import urlencode, pathname2url # noqa from urlparse import urlparse as parse_url from urlparse import uses_relative, uses_netloc, uses_params, urljoin from urllib2 import URLError # noqa from httplib import HTTPException # noqa from contextlib import contextmanager, closing # noqa from functools import wraps # noqa # @wraps(_urlopen) @contextmanager def urlopen(*args, **kwargs): with closing(_urlopen(*args, **kwargs)) as f: yield f _VALID_URLS = set(uses_relative + uses_netloc + uses_params) _VALID_URLS.discard('') class BaseIterator(object): """Subclass this and provide a "__next__()" method to obtain an iterator. Useful only when the object being iterated is non-reusable (e.g. OK for a parser, not for an in-memory table, yes for its iterator).""" def __iter__(self): return self def __next__(self): raise com.AbstractMethodError(self) if not compat.PY3: BaseIterator.next = lambda self: self.__next__() def _is_url(url): """Check to see if a URL has a valid protocol. Parameters ---------- url : str or unicode Returns ------- isurl : bool If `url` has a valid protocol return True otherwise False. """ try: return parse_url(url).scheme in _VALID_URLS except Exception: return False def _expand_user(filepath_or_buffer): """Return the argument with an initial component of ~ or ~user replaced by that user's home directory. Parameters ---------- filepath_or_buffer : object to be converted if possible Returns ------- expanded_filepath_or_buffer : an expanded filepath or the input if not expandable """ if isinstance(filepath_or_buffer, string_types): return os.path.expanduser(filepath_or_buffer) return filepath_or_buffer def _validate_header_arg(header): if isinstance(header, bool): raise TypeError("Passing a bool to header is invalid. " "Use header=None for no header or " "header=int or list-like of ints to specify " "the row(s) making up the column names") def _stringify_path(filepath_or_buffer): """Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. For backwards compatibility with older pythons, pathlib.Path and py.path objects are specially coerced. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ try: import pathlib _PATHLIB_INSTALLED = True except ImportError: _PATHLIB_INSTALLED = False try: from py.path import local as LocalPath _PY_PATH_INSTALLED = True except ImportError: _PY_PATH_INSTALLED = False if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path): return text_type(filepath_or_buffer) if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath): return filepath_or_buffer.strpath return filepath_or_buffer def is_s3_url(url): """Check for an s3, s3n, or s3a url""" try: return parse_url(url).scheme in ['s3', 's3n', 's3a'] except Exception: return False def is_gcs_url(url): """Check for a gcs url""" try: return parse_url(url).scheme in ['gcs', 'gs'] except Exception: return False def get_filepath_or_buffer(filepath_or_buffer, encoding=None, compression=None, mode=None): """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. Parameters ---------- filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer encoding : the encoding to use to decode py3 bytes, default is 'utf-8' mode : str, optional Returns ------- tuple of ({a filepath_ or buffer or S3File instance}, encoding, str, compression, str, should_close, bool) """ filepath_or_buffer = _stringify_path(filepath_or_buffer) if _is_url(filepath_or_buffer): req = _urlopen(filepath_or_buffer) content_encoding = req.headers.get('Content-Encoding', None) if content_encoding == 'gzip': # Override compression based on Content-Encoding header compression = 'gzip' reader = BytesIO(req.read()) req.close() return reader, encoding, compression, True if is_s3_url(filepath_or_buffer): from pandas.io import s3 return s3.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if is_gcs_url(filepath_or_buffer): from pandas.io import gcs return gcs.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if isinstance(filepath_or_buffer, (compat.string_types, compat.binary_type, mmap.mmap)): return _expand_user(filepath_or_buffer), None, compression, False if not is_file_like(filepath_or_buffer): msg = "Invalid file path or buffer object type: {_type}" raise ValueError(msg.format(_type=type(filepath_or_buffer))) return filepath_or_buffer, None, compression, False def file_path_to_url(path): """ converts an absolute native path to a FILE URL. Parameters ---------- path : a path in native format Returns ------- a valid FILE URL """ return urljoin('file:', pathname2url(path)) _compression_to_extension = { 'gzip': '.gz', 'bz2': '.bz2', 'zip': '.zip', 'xz': '.xz', } def _infer_compression(filepath_or_buffer, compression): """ Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : a path (str) or buffer compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). Returns ------- string or None : compression method Raises ------ ValueError on invalid compression specified """ # No compression has been explicitly specified if compression is None: return None # Infer compression if compression == 'infer': # Convert all path types (e.g. pathlib.Path) to strings filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, compat.string_types): # Cannot infer compression of a buffer, assume no compression return None # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): if filepath_or_buffer.endswith(extension): return compression return None # Compression has been specified. Check that it's valid if compression in _compression_to_extension: return compression msg = 'Unrecognized compression type: {}'.format(compression) valid = ['infer', None] + sorted(_compression_to_extension) msg += '\nValid compression types are {}'.format(valid) raise ValueError(msg) def _get_handle(path_or_buf, mode, encoding=None, compression=None, memory_map=False, is_text=True): """ Get file handle for given path/buffer and mode. Parameters ---------- path_or_buf : a path (str) or buffer mode : str mode to open path_or_buf with encoding : str or None compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). memory_map : boolean, default False See parsers._parser_params for more information. is_text : boolean, default True whether file/buffer is in text format (csv, json, etc.), or in binary mode (pickle, etc.) Returns ------- f : file-like A file-like object handles : list of file-like objects A list of file-like object that were opened in this function. """ try: from s3fs import S3File need_text_wrapping = (BytesIO, S3File) except ImportError: need_text_wrapping = (BytesIO,) handles = list() f = path_or_buf # Convert pathlib.Path/py.path.local or string path_or_buf = _stringify_path(path_or_buf) is_path = isinstance(path_or_buf, compat.string_types) if is_path: compression = _infer_compression(path_or_buf, compression) if compression: if compat.PY2 and not is_path and encoding: msg = 'compression with encoding is not yet supported in Python 2' raise ValueError(msg) # GZ Compression if compression == 'gzip': import gzip if is_path: f = gzip.open(path_or_buf, mode) else: f = gzip.GzipFile(fileobj=path_or_buf) # BZ Compression elif compression == 'bz2': import bz2 if is_path: f = bz2.BZ2File(path_or_buf, mode) elif compat.PY2: # Python 2's bz2 module can't take file objects, so have to # run through decompress manually f = StringIO(bz2.decompress(path_or_buf.read())) path_or_buf.close() else: f = bz2.BZ2File(path_or_buf) # ZIP Compression elif compression == 'zip': zf = BytesZipFile(path_or_buf, mode) # Ensure the container is closed as well. handles.append(zf) if zf.mode == 'w': f = zf elif zf.mode == 'r': zip_names = zf.namelist() if len(zip_names) == 1: f = zf.open(zip_names.pop()) elif len(zip_names) == 0: raise ValueError('Zero files found in ZIP file {}' .format(path_or_buf)) else: raise ValueError('Multiple files found in ZIP file.' ' Only one file per ZIP: {}' .format(zip_names)) # XZ Compression elif compression == 'xz': lzma = compat.import_lzma() f = lzma.LZMAFile(path_or_buf, mode) # Unrecognized Compression else: msg = 'Unrecognized compression type: {}'.format(compression) raise ValueError(msg) handles.append(f) elif is_path: if compat.PY2: # Python 2 mode = "wb" if mode == "w" else mode f = open(path_or_buf, mode) elif encoding: # Python 3 and encoding f = open(path_or_buf, mode, encoding=encoding, newline="") elif is_text: # Python 3 and no explicit encoding f = open(path_or_buf, mode, errors='replace', newline="") else: # Python 3 and binary mode f = open(path_or_buf, mode) handles.append(f) # in Python 3, convert BytesIO or fileobjects passed with an encoding if (compat.PY3 and is_text and (compression or isinstance(f, need_text_wrapping))): from io import TextIOWrapper f = TextIOWrapper(f, encoding=encoding) handles.append(f) if memory_map and hasattr(f, 'fileno'): try: g = MMapWrapper(f) f.close() f = g except Exception: # we catch any errors that may have occurred # because that is consistent with the lower-level # functionality of the C engine (pd.read_csv), so # leave the file handler as is then pass return f, handles class BytesZipFile(zipfile.ZipFile, BytesIO): """ Wrapper for standard library class ZipFile and allow the returned file-like handle to accept byte strings via `write` method. BytesIO provides attributes of file-like object and ZipFile.writestr writes bytes strings into a member of the archive. """ # GH 17778 def __init__(self, file, mode, compression=zipfile.ZIP_DEFLATED, **kwargs): if mode in ['wb', 'rb']: mode = mode.replace('b', '') super(BytesZipFile, self).__init__(file, mode, compression, **kwargs) def write(self, data): super(BytesZipFile, self).writestr(self.filename, data) @property def closed(self): return self.fp is None class MMapWrapper(BaseIterator): """ Wrapper for the Python's mmap class so that it can be properly read in by Python's csv.reader class. Parameters ---------- f : file object File object to be mapped onto memory. Must support the 'fileno' method or have an equivalent attribute """ def __init__(self, f): self.mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) def __getattr__(self, name): return getattr(self.mmap, name) def __iter__(self): return self def __next__(self): newline = self.mmap.readline() # readline returns bytes, not str, in Python 3, # but Python's CSV reader expects str, so convert # the output to str before continuing if compat.PY3: newline = compat.bytes_to_str(newline) # mmap doesn't raise if reading past the allocated # data but instead returns an empty string, so raise # if that is returned if newline == '': raise StopIteration return newline if not compat.PY3: MMapWrapper.next = lambda self: self.__next__() class UTF8Recoder(BaseIterator): """ Iterator that reads an encoded stream and reencodes the input to UTF-8 """ def __init__(self, f, encoding): self.reader = codecs.getreader(encoding)(f) def read(self, bytes=-1): return self.reader.read(bytes).encode("utf-8") def readline(self): return self.reader.readline().encode("utf-8") def next(self): return next(self.reader).encode("utf-8") if compat.PY3: # pragma: no cover def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds): # ignore encoding return csv.reader(f, dialect=dialect, **kwds) def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds): return csv.writer(f, dialect=dialect, **kwds) else: class UnicodeReader(BaseIterator): """ A CSV reader which will iterate over lines in the CSV file "f", which is encoded in the given encoding. On Python 3, this is replaced (below) by csv.reader, which handles unicode. """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): f = UTF8Recoder(f, encoding) self.reader = csv.reader(f, dialect=dialect, **kwds) def __next__(self): row = next(self.reader) return [compat.text_type(s, "utf-8") for s in row] class UnicodeWriter(object): """ A CSV writer which will write rows to CSV file "f", which is encoded in the given encoding. """ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds): # Redirect output to a queue self.queue = StringIO() self.writer = csv.writer(self.queue, dialect=dialect, **kwds) self.stream = f self.encoder = codecs.getincrementalencoder(encoding)() self.quoting = kwds.get("quoting", None) def writerow(self, row): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) row = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerow([s for s in row]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0) def writerows(self, rows): def _check_as_is(x): return (self.quoting == csv.QUOTE_NONNUMERIC and is_number(x)) or isinstance(x, str) for i, row in enumerate(rows): rows[i] = [x if _check_as_is(x) else pprint_thing(x).encode("utf-8") for x in row] self.writer.writerows([[s for s in row] for row in rows]) # Fetch UTF-8 output from the queue ... data = self.queue.getvalue() data = data.decode("utf-8") # ... and re-encode it into the target encoding data = self.encoder.encode(data) # write to the target stream self.stream.write(data) # empty queue self.queue.truncate(0)
bsd-3-clause
Obus/scikit-learn
benchmarks/bench_isotonic.py
268
3046
""" Benchmarks of isotonic regression performance. We generate a synthetic dataset of size 10^n, for n in [min, max], and examine the time taken to run isotonic regression over the dataset. The timings are then output to stdout, or visualized on a log-log scale with matplotlib. This alows the scaling of the algorithm with the problem size to be visualized and understood. """ from __future__ import print_function import numpy as np import gc from datetime import datetime from sklearn.isotonic import isotonic_regression from sklearn.utils.bench import total_seconds import matplotlib.pyplot as plt import argparse def generate_perturbed_logarithm_dataset(size): return np.random.randint(-50, 50, size=n) \ + 50. * np.log(1 + np.arange(n)) def generate_logistic_dataset(size): X = np.sort(np.random.normal(size=size)) return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X)) DATASET_GENERATORS = { 'perturbed_logarithm': generate_perturbed_logarithm_dataset, 'logistic': generate_logistic_dataset } def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = datetime.now() isotonic_regression(Y) delta = datetime.now() - tstart return total_seconds(delta) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Isotonic Regression benchmark tool") parser.add_argument('--iterations', type=int, required=True, help="Number of iterations to average timings over " "for each problem size") parser.add_argument('--log_min_problem_size', type=int, required=True, help="Base 10 logarithm of the minimum problem size") parser.add_argument('--log_max_problem_size', type=int, required=True, help="Base 10 logarithm of the maximum problem size") parser.add_argument('--show_plot', action='store_true', help="Plot timing output with matplotlib") parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(), required=True) args = parser.parse_args() timings = [] for exponent in range(args.log_min_problem_size, args.log_max_problem_size): n = 10 ** exponent Y = DATASET_GENERATORS[args.dataset](n) time_per_iteration = \ [bench_isotonic_regression(Y) for i in range(args.iterations)] timing = (n, np.mean(time_per_iteration)) timings.append(timing) # If we're not plotting, dump the timing to stdout if not args.show_plot: print(n, np.mean(time_per_iteration)) if args.show_plot: plt.plot(*zip(*timings)) plt.title("Average time taken running isotonic regression") plt.xlabel('Number of observations') plt.ylabel('Time (s)') plt.axis('tight') plt.loglog() plt.show()
bsd-3-clause
mjgrav2001/scikit-learn
sklearn/linear_model/tests/test_logistic.py
105
26588
import numpy as np import scipy.sparse as sp from scipy import linalg, optimize, sparse from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_raise_message from sklearn.utils import ConvergenceWarning from sklearn.linear_model.logistic import ( LogisticRegression, logistic_regression_path, LogisticRegressionCV, _logistic_loss_and_grad, _logistic_grad_hess, _multinomial_grad_hess, _logistic_loss, ) from sklearn.cross_validation import StratifiedKFold from sklearn.datasets import load_iris, make_classification X = [[-1, 0], [0, 1], [1, 1]] X_sp = sp.csr_matrix(X) Y1 = [0, 1, 1] Y2 = [2, 1, 0] iris = load_iris() def check_predictions(clf, X, y): """Check that the model is able to fit the classification data""" n_samples = len(y) classes = np.unique(y) n_classes = classes.shape[0] predicted = clf.fit(X, y).predict(X) assert_array_equal(clf.classes_, classes) assert_equal(predicted.shape, (n_samples,)) assert_array_equal(predicted, y) probabilities = clf.predict_proba(X) assert_equal(probabilities.shape, (n_samples, n_classes)) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) assert_array_equal(probabilities.argmax(axis=1), y) def test_predict_2_classes(): # Simple sanity check on a 2 classes dataset # Make sure it predicts the correct result on simple datasets. check_predictions(LogisticRegression(random_state=0), X, Y1) check_predictions(LogisticRegression(random_state=0), X_sp, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X, Y1) check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X, Y1) check_predictions(LogisticRegression(fit_intercept=False, random_state=0), X_sp, Y1) def test_error(): # Test for appropriate exception on errors msg = "Penalty term must be positive" assert_raise_message(ValueError, msg, LogisticRegression(C=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LogisticRegression(C="test").fit, X, Y1) for LR in [LogisticRegression, LogisticRegressionCV]: msg = "Tolerance for stopping criteria must be positive" assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1) msg = "Maximum number of iteration must be positive" assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1) assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1) def test_predict_3_classes(): check_predictions(LogisticRegression(C=10), X, Y2) check_predictions(LogisticRegression(C=10), X_sp, Y2) def test_predict_iris(): # Test logistic regression with the iris dataset n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] # Test that both multinomial and OvR solvers handle # multiclass data correctly and give good accuracy # score (>0.95) for the training data. for clf in [LogisticRegression(C=len(iris.data)), LogisticRegression(C=len(iris.data), solver='lbfgs', multi_class='multinomial'), LogisticRegression(C=len(iris.data), solver='newton-cg', multi_class='multinomial')]: clf.fit(iris.data, target) assert_array_equal(np.unique(target), clf.classes_) pred = clf.predict(iris.data) assert_greater(np.mean(pred == target), .95) probabilities = clf.predict_proba(iris.data) assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples)) pred = iris.target_names[probabilities.argmax(axis=1)] assert_greater(np.mean(pred == target), .95) def test_multinomial_validation(): for solver in ['lbfgs', 'newton-cg']: lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial') assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1]) def test_check_solver_option(): X, y = iris.data, iris.target for LR in [LogisticRegression, LogisticRegressionCV]: msg = ("Logistic Regression supports only liblinear, newton-cg and" " lbfgs solvers, got wrong_name") lr = LR(solver="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) msg = "multi_class should be either multinomial or ovr, got wrong_name" lr = LR(solver='newton-cg', multi_class="wrong_name") assert_raise_message(ValueError, msg, lr.fit, X, y) # all solver except 'newton-cg' and 'lfbgs' for solver in ['liblinear']: msg = ("Solver %s does not support a multinomial backend." % solver) lr = LR(solver=solver, multi_class='multinomial') assert_raise_message(ValueError, msg, lr.fit, X, y) # all solvers except 'liblinear' for solver in ['newton-cg', 'lbfgs']: msg = ("Solver %s supports only l2 penalties, got l1 penalty." % solver) lr = LR(solver=solver, penalty='l1') assert_raise_message(ValueError, msg, lr.fit, X, y) msg = ("Solver %s supports only dual=False, got dual=True" % solver) lr = LR(solver=solver, dual=True) assert_raise_message(ValueError, msg, lr.fit, X, y) def test_multinomial_binary(): # Test multinomial LR on a binary problem. target = (iris.target > 0).astype(np.intp) target = np.array(["setosa", "not-setosa"])[target] for solver in ['lbfgs', 'newton-cg']: clf = LogisticRegression(solver=solver, multi_class='multinomial') clf.fit(iris.data, target) assert_equal(clf.coef_.shape, (1, iris.data.shape[1])) assert_equal(clf.intercept_.shape, (1,)) assert_array_equal(clf.predict(iris.data), target) mlr = LogisticRegression(solver=solver, multi_class='multinomial', fit_intercept=False) mlr.fit(iris.data, target) pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data), axis=1)] assert_greater(np.mean(pred == target), .9) def test_sparsify(): # Test sparsify and densify members. n_samples, n_features = iris.data.shape target = iris.target_names[iris.target] clf = LogisticRegression(random_state=0).fit(iris.data, target) pred_d_d = clf.decision_function(iris.data) clf.sparsify() assert_true(sp.issparse(clf.coef_)) pred_s_d = clf.decision_function(iris.data) sp_data = sp.coo_matrix(iris.data) pred_s_s = clf.decision_function(sp_data) clf.densify() pred_d_s = clf.decision_function(sp_data) assert_array_almost_equal(pred_d_d, pred_s_d) assert_array_almost_equal(pred_d_d, pred_s_s) assert_array_almost_equal(pred_d_d, pred_d_s) def test_inconsistent_input(): # Test that an exception is raised on inconsistent input rng = np.random.RandomState(0) X_ = rng.random_sample((5, 10)) y_ = np.ones(X_.shape[0]) y_[0] = 0 clf = LogisticRegression(random_state=0) # Wrong dimensions for training data y_wrong = y_[:-1] assert_raises(ValueError, clf.fit, X, y_wrong) # Wrong dimensions for test data assert_raises(ValueError, clf.fit(X_, y_).predict, rng.random_sample((3, 12))) def test_write_parameters(): # Test that we can write to coef_ and intercept_ clf = LogisticRegression(random_state=0) clf.fit(X, Y1) clf.coef_[:] = 0 clf.intercept_[:] = 0 assert_array_almost_equal(clf.decision_function(X), 0) @raises(ValueError) def test_nan(): # Test proper NaN handling. # Regression test for Issue #252: fit used to go into an infinite loop. Xnan = np.array(X, dtype=np.float64) Xnan[0, 1] = np.nan LogisticRegression(random_state=0).fit(Xnan, Y1) def test_consistency_path(): # Test that the path algorithm is consistent rng = np.random.RandomState(0) X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2))) y = [1] * 100 + [-1] * 100 Cs = np.logspace(0, 4, 10) f = ignore_warnings # can't test with fit_intercept=True since LIBLINEAR # penalizes the intercept for method in ('lbfgs', 'newton-cg', 'liblinear'): coefs, Cs = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method) for i, C in enumerate(Cs): lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16) lr.fit(X, y) lr_coef = lr.coef_.ravel() assert_array_almost_equal(lr_coef, coefs[i], decimal=4) # test for fit_intercept=True for method in ('lbfgs', 'newton-cg', 'liblinear'): Cs = [1e3] coefs, Cs = f(logistic_regression_path)( X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method) lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4, intercept_scaling=10000) lr.fit(X, y) lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_]) assert_array_almost_equal(lr_coef, coefs[0], decimal=4) def test_liblinear_dual_random_state(): # random_state is relevant for liblinear solver only if dual=True X, y = make_classification(n_samples=20) lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr1.fit(X, y) lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15) lr2.fit(X, y) lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15) lr3.fit(X, y) # same result for same random state assert_array_almost_equal(lr1.coef_, lr2.coef_) # different results for different random states msg = "Arrays are not almost equal to 6 decimals" assert_raise_message(AssertionError, msg, assert_array_almost_equal, lr1.coef_, lr3.coef_) def test_logistic_loss_and_grad(): X_ref, y = make_classification(n_samples=20) n_features = X_ref.shape[1] X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = np.zeros(n_features) # First check that our derivation of the grad is correct loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad, approx_grad, decimal=2) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad( w, X, y, alpha=1. ) assert_array_almost_equal(loss, loss_interp) approx_grad = optimize.approx_fprime( w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3 ) assert_array_almost_equal(grad_interp, approx_grad, decimal=2) def test_logistic_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features = 50, 5 X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() X_sp = X_ref.copy() X_sp[X_sp < .1] = 0 X_sp = sp.csr_matrix(X_sp) for X in (X_ref, X_sp): w = .1 * np.ones(n_features) # First check that _logistic_grad_hess is consistent # with _logistic_loss_and_grad loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.) grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(grad, grad_2) # Now check our hessian along the second direction of the grad vector = np.zeros_like(grad) vector[1] = 1 hess_col = hess(vector) # Computation of the Hessian is particularly fragile to numerical # errors when doing simple finite differences. Here we compute the # grad along a path in the direction of the vector and then use a # least-square regression to estimate the slope e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(approx_hess_col, hess_col, decimal=3) # Second check that our intercept implementation is good w = np.zeros(n_features + 1) loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.) loss_interp_2 = _logistic_loss(w, X, y, alpha=1.) grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.) assert_array_almost_equal(loss_interp, loss_interp_2) assert_array_almost_equal(grad_interp, grad_interp_2) def test_logistic_cv(): # test for LogisticRegressionCV object n_samples, n_features = 50, 5 rng = np.random.RandomState(0) X_ref = rng.randn(n_samples, n_features) y = np.sign(X_ref.dot(5 * rng.randn(n_features))) X_ref -= X_ref.mean() X_ref /= X_ref.std() lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False, solver='liblinear') lr_cv.fit(X_ref, y) lr = LogisticRegression(C=1., fit_intercept=False) lr.fit(X_ref, y) assert_array_almost_equal(lr.coef_, lr_cv.coef_) assert_array_equal(lr_cv.coef_.shape, (1, n_features)) assert_array_equal(lr_cv.classes_, [-1, 1]) assert_equal(len(lr_cv.classes_), 2) coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values())) assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features)) assert_array_equal(lr_cv.Cs_.shape, (1, )) scores = np.asarray(list(lr_cv.scores_.values())) assert_array_equal(scores.shape, (1, 3, 1)) def test_logistic_cv_sparse(): X, y = make_classification(n_samples=50, n_features=5, random_state=0) X[X < 1.0] = 0.0 csr = sp.csr_matrix(X) clf = LogisticRegressionCV(fit_intercept=True) clf.fit(X, y) clfs = LogisticRegressionCV(fit_intercept=True) clfs.fit(csr, y) assert_array_almost_equal(clfs.coef_, clf.coef_) assert_array_almost_equal(clfs.intercept_, clf.intercept_) assert_equal(clfs.C_, clf.C_) def test_intercept_logistic_helper(): n_samples, n_features = 10, 5 X, y = make_classification(n_samples=n_samples, n_features=n_features, random_state=0) # Fit intercept case. alpha = 1. w = np.ones(n_features + 1) grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha) loss_interp = _logistic_loss(w, X, y, alpha) # Do not fit intercept. This can be considered equivalent to adding # a feature vector of ones, i.e column of one vectors. X_ = np.hstack((X, np.ones(10)[:, np.newaxis])) grad, hess = _logistic_grad_hess(w, X_, y, alpha) loss = _logistic_loss(w, X_, y, alpha) # In the fit_intercept=False case, the feature vector of ones is # penalized. This should be taken care of. assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss) # Check gradient. assert_array_almost_equal(grad_interp[:n_features], grad[:n_features]) assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1]) rng = np.random.RandomState(0) grad = rng.rand(n_features + 1) hess_interp = hess_interp(grad) hess = hess(grad) assert_array_almost_equal(hess_interp[:n_features], hess[:n_features]) assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1]) def test_ovr_multinomial_iris(): # Test that OvR and multinomial are correct using the iris dataset. train, target = iris.data, iris.target n_samples, n_features = train.shape # Use pre-defined fold as folds generated for different y cv = StratifiedKFold(target, 3) clf = LogisticRegressionCV(cv=cv) clf.fit(train, target) clf1 = LogisticRegressionCV(cv=cv) target_copy = target.copy() target_copy[target_copy == 0] = 1 clf1.fit(train, target_copy) assert_array_almost_equal(clf.scores_[2], clf1.scores_[2]) assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_) assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_) # Test the shape of various attributes. assert_equal(clf.coef_.shape, (3, n_features)) assert_array_equal(clf.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1)) assert_equal(clf.Cs_.shape, (10, )) scores = np.asarray(list(clf.scores_.values())) assert_equal(scores.shape, (3, 3, 10)) # Test that for the iris data multinomial gives a better accuracy than OvR for solver in ['lbfgs', 'newton-cg']: clf_multi = LogisticRegressionCV( solver=solver, multi_class='multinomial', max_iter=15 ) clf_multi.fit(train, target) multi_score = clf_multi.score(train, target) ovr_score = clf.score(train, target) assert_greater(multi_score, ovr_score) # Test attributes of LogisticRegressionCV assert_equal(clf.coef_.shape, clf_multi.coef_.shape) assert_array_equal(clf_multi.classes_, [0, 1, 2]) coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values())) assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1)) assert_equal(clf_multi.Cs_.shape, (10, )) scores = np.asarray(list(clf_multi.scores_.values())) assert_equal(scores.shape, (3, 3, 10)) def test_logistic_regression_solvers(): X, y = make_classification(n_features=10, n_informative=5, random_state=0) clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False) clf_n.fit(X, y) clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False) clf_lbf.fit(X, y) clf_lib = LogisticRegression(fit_intercept=False) clf_lib.fit(X, y) assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3) assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3) assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3) def test_logistic_regression_solvers_multiclass(): X, y = make_classification(n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0) clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False) clf_n.fit(X, y) clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False) clf_lbf.fit(X, y) clf_lib = LogisticRegression(fit_intercept=False) clf_lib.fit(X, y) assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4) assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4) assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4) def test_logistic_regressioncv_class_weights(): X, y = make_classification(n_samples=20, n_features=20, n_informative=10, n_classes=3, random_state=0) # Test the liblinear fails when class_weight of type dict is # provided, when it is multiclass. However it can handle # binary problems. clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2}, solver='liblinear') assert_raises(ValueError, clf_lib.fit, X, y) y_ = y.copy() y_[y == 2] = 1 clf_lib.fit(X, y_) assert_array_equal(clf_lib.classes_, [0, 1]) # Test for class_weight=balanced X, y = make_classification(n_samples=20, n_features=20, n_informative=10, random_state=0) clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False, class_weight='balanced') clf_lbf.fit(X, y) clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False, class_weight='balanced') clf_lib.fit(X, y) assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4) def test_logistic_regression_convergence_warnings(): # Test that warnings are raised if model does not converge X, y = make_classification(n_samples=20, n_features=20) clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1) assert_warns(ConvergenceWarning, clf_lib.fit, X, y) assert_equal(clf_lib.n_iter_, 2) def test_logistic_regression_multinomial(): # Tests for the multinomial option in logistic regression # Some basic attributes of Logistic Regression n_samples, n_features, n_classes = 50, 20, 3 X, y = make_classification(n_samples=n_samples, n_features=n_features, n_informative=10, n_classes=n_classes, random_state=0) clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial') clf_int.fit(X, y) assert_array_equal(clf_int.coef_.shape, (n_classes, n_features)) clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial', fit_intercept=False) clf_wint.fit(X, y) assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features)) # Similar tests for newton-cg solver option clf_ncg_int = LogisticRegression(solver='newton-cg', multi_class='multinomial') clf_ncg_int.fit(X, y) assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features)) clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False, multi_class='multinomial') clf_ncg_wint.fit(X, y) assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features)) # Compare solutions between lbfgs and newton-cg assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3) assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3) assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3) # Test that the path give almost the same results. However since in this # case we take the average of the coefs after fitting across all the # folds, it need not be exactly the same. for solver in ['lbfgs', 'newton-cg']: clf_path = LogisticRegressionCV(solver=solver, multi_class='multinomial', Cs=[1.]) clf_path.fit(X, y) assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3) assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3) def test_multinomial_grad_hess(): rng = np.random.RandomState(0) n_samples, n_features, n_classes = 100, 5, 3 X = rng.randn(n_samples, n_features) w = rng.rand(n_classes, n_features) Y = np.zeros((n_samples, n_classes)) ind = np.argmax(np.dot(X, w.T), axis=1) Y[range(0, n_samples), ind] = 1 w = w.ravel() sample_weights = np.ones(X.shape[0]) grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1., sample_weight=sample_weights) # extract first column of hessian matrix vec = np.zeros(n_features * n_classes) vec[0] = 1 hess_col = hessp(vec) # Estimate hessian using least squares as done in # test_logistic_grad_hess e = 1e-3 d_x = np.linspace(-e, e, 30) d_grad = np.array([ _multinomial_grad_hess(w + t * vec, X, Y, alpha=1., sample_weight=sample_weights)[0] for t in d_x ]) d_grad -= d_grad.mean(axis=0) approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel() assert_array_almost_equal(hess_col, approx_hess_col) def test_liblinear_decision_function_zero(): # Test negative prediction when decision_function values are zero. # Liblinear predicts the positive class when decision_function values # are zero. This is a test to verify that we do not do the same. # See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600 # and the PR https://github.com/scikit-learn/scikit-learn/pull/3623 X, y = make_classification(n_samples=5, n_features=5) clf = LogisticRegression(fit_intercept=False) clf.fit(X, y) # Dummy data such that the decision function becomes zero. X = np.zeros((5, 5)) assert_array_equal(clf.predict(X), np.zeros(5)) def test_liblinear_logregcv_sparse(): # Test LogRegCV with solver='liblinear' works for sparse matrices X, y = make_classification(n_samples=10, n_features=5) clf = LogisticRegressionCV(solver='liblinear') clf.fit(sparse.csr_matrix(X), y) def test_logreg_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: clf = LogisticRegression(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % clf.intercept_scaling) assert_raise_message(ValueError, msg, clf.fit, X, Y1) def test_logreg_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False clf = LogisticRegression(fit_intercept=False) clf.fit(X, Y1) assert_equal(clf.intercept_, 0.) def test_logreg_cv_penalty(): # Test that the correct penalty is passed to the final fit. X, y = make_classification(n_samples=50, n_features=20, random_state=0) lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear') lr_cv.fit(X, y) lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear') lr.fit(X, y) assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
bsd-3-clause
Ichaelus/Github-Classifier
Application/Models/ClassificationModules/gbrtfilesandfolders.py
1
3372
#!/usr/bin/env python # -*- coding: utf-8 -*- from Models.FeatureProcessing import * import sklearn from sklearn.ensemble import GradientBoostingClassifier import numpy as np import abc from ClassificationModule import ClassificationModule class gbrtfilesandfolders(ClassificationModule): """A Gradient Tree Boosting Classifier """ def __init__(self, file_corpus, foldername_corpus, n_estimators=150): description = "Gradient Tree Boosting / Gradient Boosted Regression Trees (GBRT)." ClassificationModule.__init__(self, "Files and Folders Gradient Tree Boosting", description) self.fileVectorizer = getTextVectorizer(50) self.foldernameVectorizer = getTextVectorizer(50) # Vectorizer for filetypes corpus = [] for type in file_corpus: corpus.append(type) self.fileVectorizer.fit(corpus) # Vectorizer for foldernames corpus = [] for folder in foldername_corpus: corpus.append(folder) self.foldernameVectorizer.fit(corpus) self.clf = GradientBoostingClassifier(n_estimators=n_estimators) print "\t-", self.name def resetAllTraining(self): """Reset classification module to status before training""" self.clf = sklearn.base.clone(self.clf) def trainOnSample(self, sample, nb_epoch=10, shuffle=True, verbose=True): """Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird.""" readme_vec = self.formatInputData(sample) label_index = getLabelIndex(sample) return self.clf.fit(readme_vec, np.expand_dims(label_index, axis=0)) def train(self, samples, nb_epoch=10, shuffle=True, verbose=True): """Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)""" train_samples = [] train_lables = [] for sample in samples: formatted_sample = self.formatInputData(sample)[0].tolist() train_samples.append(formatted_sample) train_lables.append(getLabelIndex(sample)) train_lables = np.asarray(train_lables) train_result = self.clf.fit(train_samples, train_lables) self.isTrained = True return train_result def predictLabel(self, sample): """Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde""" if not self.isTrained: return 0 sample = self.formatInputData(sample) return self.clf.predict(sample)[0] def predictLabelAndProbability(self, sample): """Return the probability the module assignes each label""" if not self.isTrained: return [0, 0, 0, 0, 0, 0, 0, 0] sample = self.formatInputData(sample) prediction = self.clf.predict_proba(sample)[0] return [np.argmax(prediction)] + list(prediction) def formatInputData(self, sample): """Extract description and transform to vector""" sd = getReadme(sample) arr = list(self.fileVectorizer.transform([getFiletypesString(sample)]).toarray()[0]) arr += list(self.foldernameVectorizer.transform([getFoldernames(sample)]).toarray()[0]) # Returns numpy array which contains 1 array with features return np.asarray([arr])
mit
jstoxrocky/statsmodels
statsmodels/sandbox/distributions/mv_measures.py
33
6257
'''using multivariate dependence and divergence measures The standard correlation coefficient measures only linear dependence between random variables. kendall's tau measures any monotonic relationship also non-linear. mutual information measures any kind of dependence, but does not distinguish between positive and negative relationship mutualinfo_kde and mutualinfo_binning follow Khan et al. 2007 Shiraj Khan, Sharba Bandyopadhyay, Auroop R. Ganguly, Sunil Saigal, David J. Erickson, III, Vladimir Protopopescu, and George Ostrouchov, Relative performance of mutual information estimation methods for quantifying the dependence among short and noisy data, Phys. Rev. E 76, 026209 (2007) http://pre.aps.org/abstract/PRE/v76/i2/e026209 ''' import numpy as np from scipy import stats from scipy.stats import gaussian_kde import statsmodels.sandbox.infotheo as infotheo def mutualinfo_kde(y, x, normed=True): '''mutual information of two random variables estimated with kde ''' nobs = len(x) if not len(y) == nobs: raise ValueError('both data arrays need to have the same size') x = np.asarray(x, float) y = np.asarray(y, float) yx = np.vstack((y,x)) kde_x = gaussian_kde(x)(x) kde_y = gaussian_kde(y)(y) kde_yx = gaussian_kde(yx)(yx) mi_obs = np.log(kde_yx) - np.log(kde_x) - np.log(kde_y) mi = mi_obs.sum() / nobs if normed: mi_normed = np.sqrt(1. - np.exp(-2 * mi)) return mi_normed else: return mi def mutualinfo_kde_2sample(y, x, normed=True): '''mutual information of two random variables estimated with kde ''' nobs = len(x) x = np.asarray(x, float) y = np.asarray(y, float) #yx = np.vstack((y,x)) kde_x = gaussian_kde(x.T)(x.T) kde_y = gaussian_kde(y.T)(x.T) #kde_yx = gaussian_kde(yx)(yx) mi_obs = np.log(kde_x) - np.log(kde_y) if len(mi_obs) != nobs: raise ValueError("Wrong number of observations") mi = mi_obs.mean() if normed: mi_normed = np.sqrt(1. - np.exp(-2 * mi)) return mi_normed else: return mi def mutualinfo_binned(y, x, bins, normed=True): '''mutual information of two random variables estimated with kde Notes ----- bins='auto' selects the number of bins so that approximately 5 observations are expected to be in each bin under the assumption of independence. This follows roughly the description in Kahn et al. 2007 ''' nobs = len(x) if not len(y) == nobs: raise ValueError('both data arrays need to have the same size') x = np.asarray(x, float) y = np.asarray(y, float) #yx = np.vstack((y,x)) ## fyx, binsy, binsx = np.histogram2d(y, x, bins=bins) ## fx, binsx_ = np.histogram(x, bins=binsx) ## fy, binsy_ = np.histogram(y, bins=binsy) if bins == 'auto': ys = np.sort(y) xs = np.sort(x) #quantiles = np.array([0,0.25, 0.4, 0.6, 0.75, 1]) qbin_sqr = np.sqrt(5./nobs) quantiles = np.linspace(0, 1, 1./qbin_sqr) quantile_index = ((nobs-1)*quantiles).astype(int) #move edges so that they don't coincide with an observation shift = 1e-6 + np.ones(quantiles.shape) shift[0] -= 2*1e-6 binsy = ys[quantile_index] + shift binsx = xs[quantile_index] + shift elif np.size(bins) == 1: binsy = bins binsx = bins elif (len(bins) == 2): binsy, binsx = bins ## if np.size(bins[0]) == 1: ## binsx = bins[0] ## if np.size(bins[1]) == 1: ## binsx = bins[1] fx, binsx = np.histogram(x, bins=binsx) fy, binsy = np.histogram(y, bins=binsy) fyx, binsy, binsx = np.histogram2d(y, x, bins=(binsy, binsx)) pyx = fyx * 1. / nobs px = fx * 1. / nobs py = fy * 1. / nobs mi_obs = pyx * (np.log(pyx+1e-10) - np.log(py)[:,None] - np.log(px)) mi = mi_obs.sum() if normed: mi_normed = np.sqrt(1. - np.exp(-2 * mi)) return mi_normed, (pyx, py, px, binsy, binsx), mi_obs else: return mi if __name__ == '__main__': import statsmodels.api as sm funtype = ['linear', 'quadratic'][1] nobs = 200 sig = 2#5. #x = np.linspace(-3, 3, nobs) + np.random.randn(nobs) x = np.sort(3*np.random.randn(nobs)) exog = sm.add_constant(x, prepend=True) #y = 0 + np.log(1+x**2) + sig * np.random.randn(nobs) if funtype == 'quadratic': y = 0 + x**2 + sig * np.random.randn(nobs) if funtype == 'linear': y = 0 + x + sig * np.random.randn(nobs) print('correlation') print(np.corrcoef(y,x)[0, 1]) print('pearsonr', stats.pearsonr(y,x)) print('spearmanr', stats.spearmanr(y,x)) print('kendalltau', stats.kendalltau(y,x)) pxy, binsx, binsy = np.histogram2d(x,y, bins=5) px, binsx_ = np.histogram(x, bins=binsx) py, binsy_ = np.histogram(y, bins=binsy) print('mutualinfo', infotheo.mutualinfo(px*1./nobs, py*1./nobs, 1e-15+pxy*1./nobs, logbase=np.e)) print('mutualinfo_kde normed', mutualinfo_kde(y,x)) print('mutualinfo_kde ', mutualinfo_kde(y,x, normed=False)) mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \ mutualinfo_binned(y, x, 5, normed=True) print('mutualinfo_binned normed', mi_normed) print('mutualinfo_binned ', mi_obs.sum()) mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \ mutualinfo_binned(y, x, 'auto', normed=True) print('auto') print('mutualinfo_binned normed', mi_normed) print('mutualinfo_binned ', mi_obs.sum()) ys = np.sort(y) xs = np.sort(x) by = ys[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)] bx = xs[((nobs-1)*np.array([0, 0.25, 0.4, 0.6, 0.75, 1])).astype(int)] mi_normed, (pyx2, py2, px2, binsy2, binsx2), mi_obs = \ mutualinfo_binned(y, x, (by,bx), normed=True) print('quantiles') print('mutualinfo_binned normed', mi_normed) print('mutualinfo_binned ', mi_obs.sum()) doplot = 1#False if doplot: import matplotlib.pyplot as plt plt.plot(x, y, 'o') olsres = sm.OLS(y, exog).fit() plt.plot(x, olsres.fittedvalues)
bsd-3-clause
xccui/flink
flink-python/pyflink/table/serializers.py
13
3089
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import io from pyflink.serializers import Serializer from pyflink.table.utils import arrow_to_pandas, pandas_to_arrow class ArrowSerializer(Serializer): """ Serializes pandas.Series into Arrow streaming format data. """ def __init__(self, schema, row_type, timezone): super(ArrowSerializer, self).__init__() self._schema = schema self._field_types = row_type.field_types() self._timezone = timezone def __repr__(self): return "ArrowSerializer" def dump_to_stream(self, iterator, stream): writer = None try: for cols in iterator: batch = pandas_to_arrow(self._schema, self._timezone, self._field_types, cols) if writer is None: import pyarrow as pa writer = pa.RecordBatchStreamWriter(stream, batch.schema) writer.write_batch(batch) finally: if writer is not None: writer.close() def load_from_stream(self, stream): import pyarrow as pa reader = pa.ipc.open_stream(stream) for batch in reader: yield arrow_to_pandas(self._timezone, self._field_types, [batch]) def load_from_iterator(self, itor): class IteratorIO(io.RawIOBase): def __init__(self, itor): super(IteratorIO, self).__init__() self.itor = itor self.leftover = None def readable(self): return True def readinto(self, b): output_buffer_len = len(b) input = self.leftover or (self.itor.next() if self.itor.hasNext() else None) if input is None: return 0 output, self.leftover = input[:output_buffer_len], input[output_buffer_len:] b[:len(output)] = output return len(output) import pyarrow as pa reader = pa.ipc.open_stream( io.BufferedReader(IteratorIO(itor), buffer_size=io.DEFAULT_BUFFER_SIZE)) for batch in reader: yield batch
apache-2.0
anntzer/scikit-learn
sklearn/cluster/tests/test_mean_shift.py
5
6724
""" Testing for mean shift clustering methods """ import numpy as np import warnings import pytest from scipy import sparse from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.cluster import MeanShift from sklearn.cluster import mean_shift from sklearn.cluster import estimate_bandwidth from sklearn.cluster import get_bin_seeds from sklearn.datasets import make_blobs from sklearn.metrics import v_measure_score n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=300, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=11) def test_estimate_bandwidth(): # Test estimate_bandwidth bandwidth = estimate_bandwidth(X, n_samples=200) assert 0.9 <= bandwidth <= 1.5 def test_estimate_bandwidth_1sample(): # Test estimate_bandwidth when n_samples=1 and quantile<1, so that # n_neighbors is set to 1. bandwidth = estimate_bandwidth(X, n_samples=1, quantile=0.3) assert bandwidth == pytest.approx(0., abs=1e-5) @pytest.mark.parametrize("bandwidth, cluster_all, expected, " "first_cluster_label", [(1.2, True, 3, 0), (1.2, False, 4, -1)]) def test_mean_shift(bandwidth, cluster_all, expected, first_cluster_label): # Test MeanShift algorithm ms = MeanShift(bandwidth=bandwidth, cluster_all=cluster_all) labels = ms.fit(X).labels_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) assert n_clusters_ == expected assert labels_unique[0] == first_cluster_label cluster_centers, labels_mean_shift = mean_shift(X, cluster_all=cluster_all) labels_mean_shift_unique = np.unique(labels_mean_shift) n_clusters_mean_shift = len(labels_mean_shift_unique) assert n_clusters_mean_shift == expected assert labels_mean_shift_unique[0] == first_cluster_label def test_mean_shift_negative_bandwidth(): bandwidth = -1 ms = MeanShift(bandwidth=bandwidth) msg = (r"bandwidth needs to be greater than zero or None," r" got -1\.000000") with pytest.raises(ValueError, match=msg): ms.fit(X) def test_estimate_bandwidth_with_sparse_matrix(): # Test estimate_bandwidth with sparse matrix X = sparse.lil_matrix((1000, 1000)) msg = "A sparse matrix was passed, but dense data is required." with pytest.raises(TypeError, match=msg): estimate_bandwidth(X) def test_parallel(): centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=50, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=11) ms1 = MeanShift(n_jobs=2) ms1.fit(X) ms2 = MeanShift() ms2.fit(X) assert_array_almost_equal(ms1.cluster_centers_, ms2.cluster_centers_) assert_array_equal(ms1.labels_, ms2.labels_) def test_meanshift_predict(): # Test MeanShift.predict ms = MeanShift(bandwidth=1.2) labels = ms.fit_predict(X) labels2 = ms.predict(X) assert_array_equal(labels, labels2) def test_meanshift_all_orphans(): # init away from the data, crash with a sensible warning ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]]) msg = "No point was within bandwidth=0.1" with pytest.raises(ValueError, match=msg): ms.fit(X,) def test_unfitted(): # Non-regression: before fit, there should be not fitted attributes. ms = MeanShift() assert not hasattr(ms, "cluster_centers_") assert not hasattr(ms, "labels_") def test_cluster_intensity_tie(): X = np.array([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]]) c1 = MeanShift(bandwidth=2).fit(X) X = np.array([[4, 7], [3, 5], [3, 6], [1, 1], [2, 1], [1, 0]]) c2 = MeanShift(bandwidth=2).fit(X) assert_array_equal(c1.labels_, [1, 1, 1, 0, 0, 0]) assert_array_equal(c2.labels_, [0, 0, 0, 1, 1, 1]) def test_bin_seeds(): # Test the bin seeding technique which can be used in the mean shift # algorithm # Data is just 6 points in the plane X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2], [2., 1.], [2.1, 1.1], [0., 0.]]) # With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be # found ground_truth = {(1., 1.), (2., 1.), (0., 0.)} test_bins = get_bin_seeds(X, 1, 1) test_result = set(tuple(p) for p in test_bins) assert len(ground_truth.symmetric_difference(test_result)) == 0 # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be # found ground_truth = {(1., 1.), (2., 1.)} test_bins = get_bin_seeds(X, 1, 2) test_result = set(tuple(p) for p in test_bins) assert len(ground_truth.symmetric_difference(test_result)) == 0 # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found # we bail and use the whole data here. with warnings.catch_warnings(record=True): test_bins = get_bin_seeds(X, 0.01, 1) assert_array_almost_equal(test_bins, X) # tight clusters around [0, 0] and [1, 1], only get two bins X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]], cluster_std=0.1, random_state=0) test_bins = get_bin_seeds(X, 1) assert_array_equal(test_bins, [[0, 0], [1, 1]]) @pytest.mark.parametrize('max_iter', [1, 100]) def test_max_iter(max_iter): clusters1, _ = mean_shift(X, max_iter=max_iter) ms = MeanShift(max_iter=max_iter).fit(X) clusters2 = ms.cluster_centers_ assert ms.n_iter_ <= ms.max_iter assert len(clusters1) == len(clusters2) for c1, c2 in zip(clusters1, clusters2): assert np.allclose(c1, c2) def test_mean_shift_zero_bandwidth(): # Check that mean shift works when the estimated bandwidth is 0. X = np.array([1, 1, 1, 2, 2, 2, 3, 3]).reshape(-1, 1) # estimate_bandwidth with default args returns 0 on this dataset bandwidth = estimate_bandwidth(X) assert bandwidth == 0 # get_bin_seeds with a 0 bin_size should return the dataset itself assert get_bin_seeds(X, bin_size=bandwidth) is X # MeanShift with binning and a 0 estimated bandwidth should be equivalent # to no binning. ms_binning = MeanShift(bin_seeding=True, bandwidth=None).fit(X) ms_nobinning = MeanShift(bin_seeding=False).fit(X) expected_labels = np.array([0, 0, 0, 1, 1, 1, 2, 2]) assert v_measure_score(ms_binning.labels_, expected_labels) == 1 assert v_measure_score(ms_nobinning.labels_, expected_labels) == 1 assert_allclose(ms_binning.cluster_centers_, ms_nobinning.cluster_centers_)
bsd-3-clause
fierval/retina
DiabeticRetinopathy/Features/extract_features.py
1
2129
import numpy as np import pandas as pd from kobra.dr import Labels from kobra import TrainFiles from kobra.tr_utils import prep_out_path, time_now_str import os from os import path import mahotas as mh import mahotas.labeled as mhl import cv2 import time preprocessed = '/kaggle/retina/train/labelled' masks = '/kaggle/retina/train/masks' orig = '/kaggle/retina/train/sample/split' output = '/kaggle/retina/train/sample/features' n_bins = 100 prep_out_path(output) for i in range(0, 5): prefix = str(i) print "Starting extraction @ ", time_now_str() files = os.listdir(path.join(preprocessed, prefix)) # intermediate output will be stored here # we will save all the files first then join them into one csv file df = pd.DataFrame(columns = range(n_bins * 2 + 1)) j = 0 for f in files: start = time.time() im_file = path.join(prefix, f) extractor = DarkBrightDetector(preprocessed, orig, im_file, masks, is_debug = False) labels = extractor.find_bright_regions() drusen = extractor.get_predicted_region(Labels.Drusen) blood = extractor.get_predicted_region(Labels.Haemorage) Bc = np.ones((5, 5)) labels_drusen, n_drusen = mh.label(drusen, Bc) labels_blood, n_blood = mh.label(blood, Bc) area = float(cv2.countNonZero(extractor.mask)) outp = np.array([], dtype = np.int) # sizes excluding background sizes_drusen = mhl.labeled_size(labels_drusen)[1:] / area sizes_blood = mhl.labeled_size(labels_blood)[1:] / area hist_druzen, _ = np.histogram(sizes_drusen, n_bins, (0, 1e-3)) hist_blood, _ = np.histogram(sizes_blood, n_bins, (0, 1e-3)) outp = np.r_[outp, hist_druzen] outp = np.r_[outp, hist_blood] outp = np.r_[outp, i] df.loc[j] = outp j += 1 print "Extracted: {0}, took {1:02.2f} sec ".format(im_file, time.time() - start) # write out the csv df.to_csv(path.join(output, prefix + ".txt"), index = False, header=False) print "Extracted: ", prefix, "@", time_now_str()
mit
ppries/tensorflow
tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py
30
4777
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests feeding functions using arrays and `DataFrames`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff # pylint: disable=g-import-not-at-top try: import pandas as pd HAS_PANDAS = True except ImportError: HAS_PANDAS = False def vals_to_list(a): return {key: val.tolist() if isinstance(val, np.ndarray) else val for key, val in a.items()} class _FeedingFunctionsTestCase(tf.test.TestCase): """Tests for feeding functions.""" def testArrayFeedFnBatchOne(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 1) # cycle around a couple times for x in range(0, 100): i = x % 16 expected = {"index_placeholder": [i], "value_placeholder": [[2 * i, 2 * i + 1]]} actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchFive(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 5) # cycle around a couple times for _ in range(0, 101, 2): aff() expected = {"index_placeholder": [15, 0, 1, 2, 3], "value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]} actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testArrayFeedFnBatchOneHundred(self): array = np.arange(32).reshape([16, 2]) placeholders = ["index_placeholder", "value_placeholder"] aff = ff._ArrayFeedFn(placeholders, array, 100) expected = {"index_placeholder": list(range(0, 16)) * 6 + list(range(0, 4)), "value_placeholder": np.arange(32).reshape([16, 2]).tolist() * 6 + [[0, 1], [2, 3], [4, 5], [6, 7]]} actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchOne(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 1) # cycle around a couple times for x in range(0, 100): i = x % 32 expected = {"index_placeholder": [i + 96], "a_placeholder": [32 + i], "b_placeholder": [64 + i]} actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchFive(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 5) # cycle around a couple times for _ in range(0, 101, 2): aff() expected = {"index_placeholder": [127, 96, 97, 98, 99], "a_placeholder": [63, 32, 33, 34, 35], "b_placeholder": [95, 64, 65, 66, 67]} actual = aff() self.assertEqual(expected, vals_to_list(actual)) def testPandasFeedFnBatchOneHundred(self): if not HAS_PANDAS: return array1 = np.arange(32, 64) array2 = np.arange(64, 96) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128)) placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"] aff = ff._PandasFeedFn(placeholders, df, 100) expected = { "index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)), "a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)), "b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68)) } actual = aff() self.assertEqual(expected, vals_to_list(actual)) if __name__ == "__main__": tf.test.main()
apache-2.0
jart/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder_test.py
14
13555
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `DataFeeder`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin # pylint: disable=wildcard-import from tensorflow.contrib.learn.python.learn.learn_io import * from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.lib.io import file_io from tensorflow.python.platform import test # pylint: enable=wildcard-import class DataFeederTest(test.TestCase): # pylint: disable=undefined-variable """Tests for `DataFeeder`.""" def setUp(self): self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir') file_io.create_dir(self._base_dir) def tearDown(self): file_io.delete_recursively(self._base_dir) def _wrap_dict(self, data, prepend=''): return {prepend + '1': data, prepend + '2': data} def _assert_raises(self, input_data): with self.assertRaisesRegexp(TypeError, 'annot convert'): data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1) def _assert_dtype(self, expected_np_dtype, expected_tf_dtype, input_data): feeder = data_feeder.DataFeeder(input_data, None, n_classes=0, batch_size=1) if isinstance(input_data, dict): for v in list(feeder.input_dtype.values()): self.assertEqual(expected_np_dtype, v) else: self.assertEqual(expected_np_dtype, feeder.input_dtype) with ops.Graph().as_default() as g, self.test_session(g): inp, _ = feeder.input_builder() if isinstance(inp, dict): for v in list(inp.values()): self.assertEqual(expected_tf_dtype, v.dtype) else: self.assertEqual(expected_tf_dtype, inp.dtype) def test_input_int8(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.int8) self._assert_dtype(np.int8, dtypes.int8, data) self._assert_dtype(np.int8, dtypes.int8, self._wrap_dict(data)) def test_input_int16(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.int16) self._assert_dtype(np.int16, dtypes.int16, data) self._assert_dtype(np.int16, dtypes.int16, self._wrap_dict(data)) def test_input_int32(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.int32) self._assert_dtype(np.int32, dtypes.int32, data) self._assert_dtype(np.int32, dtypes.int32, self._wrap_dict(data)) def test_input_int64(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.int64) self._assert_dtype(np.int64, dtypes.int64, data) self._assert_dtype(np.int64, dtypes.int64, self._wrap_dict(data)) def test_input_uint32(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.uint32) self._assert_dtype(np.uint32, dtypes.uint32, data) self._assert_dtype(np.uint32, dtypes.uint32, self._wrap_dict(data)) def test_input_uint64(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.uint64) self._assert_dtype(np.uint64, dtypes.uint64, data) self._assert_dtype(np.uint64, dtypes.uint64, self._wrap_dict(data)) def test_input_uint8(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.uint8) self._assert_dtype(np.uint8, dtypes.uint8, data) self._assert_dtype(np.uint8, dtypes.uint8, self._wrap_dict(data)) def test_input_uint16(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.uint16) self._assert_dtype(np.uint16, dtypes.uint16, data) self._assert_dtype(np.uint16, dtypes.uint16, self._wrap_dict(data)) def test_input_float16(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.float16) self._assert_dtype(np.float16, dtypes.float16, data) self._assert_dtype(np.float16, dtypes.float16, self._wrap_dict(data)) def test_input_float32(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.float32) self._assert_dtype(np.float32, dtypes.float32, data) self._assert_dtype(np.float32, dtypes.float32, self._wrap_dict(data)) def test_input_float64(self): data = np.matrix([[1, 2], [3, 4]], dtype=np.float64) self._assert_dtype(np.float64, dtypes.float64, data) self._assert_dtype(np.float64, dtypes.float64, self._wrap_dict(data)) def test_input_bool(self): data = np.array([[False for _ in xrange(2)] for _ in xrange(2)]) self._assert_dtype(np.bool, dtypes.bool, data) self._assert_dtype(np.bool, dtypes.bool, self._wrap_dict(data)) def test_input_string(self): input_data = np.array([['str%d' % i for i in xrange(2)] for _ in xrange(2)]) self._assert_dtype(input_data.dtype, dtypes.string, input_data) self._assert_dtype(input_data.dtype, dtypes.string, self._wrap_dict(input_data)) def _assertAllClose(self, src, dest, src_key_of=None, src_prop=None): def func(x): val = getattr(x, src_prop) if src_prop else x return val if src_key_of is None else src_key_of[val] if isinstance(src, dict): for k in list(src.keys()): self.assertAllClose(func(src[k]), dest) else: self.assertAllClose(func(src), dest) def test_unsupervised(self): def func(feeder): with self.test_session(): inp, _ = feeder.input_builder() feed_dict_fn = feeder.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[1, 2]], feed_dict, 'name') data = np.matrix([[1, 2], [2, 3], [3, 4]]) func(data_feeder.DataFeeder(data, None, n_classes=0, batch_size=1)) func( data_feeder.DataFeeder( self._wrap_dict(data), None, n_classes=0, batch_size=1)) def test_data_feeder_regression(self): def func(df): inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name') self._assertAllClose(out, [2, 1], feed_dict, 'name') x = np.matrix([[1, 2], [3, 4]]) y = np.array([1, 2]) func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3)) func( data_feeder.DataFeeder( self._wrap_dict(x, 'in'), self._wrap_dict(y, 'out'), n_classes=self._wrap_dict(0, 'out'), batch_size=3)) def test_epoch(self): def func(feeder): with self.test_session(): feeder.input_builder() epoch = feeder.make_epoch_variable() feed_dict_fn = feeder.get_feed_dict_fn() # First input feed_dict = feed_dict_fn() self.assertAllClose(feed_dict[epoch.name], [0]) # Second input feed_dict = feed_dict_fn() self.assertAllClose(feed_dict[epoch.name], [0]) # Third input feed_dict = feed_dict_fn() self.assertAllClose(feed_dict[epoch.name], [0]) # Back to the first input again, so new epoch. feed_dict = feed_dict_fn() self.assertAllClose(feed_dict[epoch.name], [1]) data = np.matrix([[1, 2], [2, 3], [3, 4]]) labels = np.array([0, 0, 1]) func(data_feeder.DataFeeder(data, labels, n_classes=0, batch_size=1)) func( data_feeder.DataFeeder( self._wrap_dict(data, 'in'), self._wrap_dict(labels, 'out'), n_classes=self._wrap_dict(0, 'out'), batch_size=1)) def test_data_feeder_multioutput_regression(self): def func(df): inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name') self._assertAllClose(out, [[3, 4], [1, 2]], feed_dict, 'name') x = np.matrix([[1, 2], [3, 4]]) y = np.array([[1, 2], [3, 4]]) func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=2)) func( data_feeder.DataFeeder( self._wrap_dict(x, 'in'), self._wrap_dict(y, 'out'), n_classes=self._wrap_dict(0, 'out'), batch_size=2)) def test_data_feeder_multioutput_classification(self): def func(df): inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name') self._assertAllClose( out, [[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]], [[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0]]], feed_dict, 'name') x = np.matrix([[1, 2], [3, 4]]) y = np.array([[0, 1, 2], [2, 3, 4]]) func(data_feeder.DataFeeder(x, y, n_classes=5, batch_size=2)) func( data_feeder.DataFeeder( self._wrap_dict(x, 'in'), self._wrap_dict(y, 'out'), n_classes=self._wrap_dict(5, 'out'), batch_size=2)) def test_streaming_data_feeder(self): def func(df): inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[[1, 2]], [[3, 4]]], feed_dict, 'name') self._assertAllClose(out, [[[1], [2]], [[2], [2]]], feed_dict, 'name') def x_iter(wrap_dict=False): yield np.array([[1, 2]]) if not wrap_dict else self._wrap_dict( np.array([[1, 2]]), 'in') yield np.array([[3, 4]]) if not wrap_dict else self._wrap_dict( np.array([[3, 4]]), 'in') def y_iter(wrap_dict=False): yield np.array([[1], [2]]) if not wrap_dict else self._wrap_dict( np.array([[1], [2]]), 'out') yield np.array([[2], [2]]) if not wrap_dict else self._wrap_dict( np.array([[2], [2]]), 'out') func( data_feeder.StreamingDataFeeder( x_iter(), y_iter(), n_classes=0, batch_size=2)) func( data_feeder.StreamingDataFeeder( x_iter(True), y_iter(True), n_classes=self._wrap_dict(0, 'out'), batch_size=2)) # Test non-full batches. func( data_feeder.StreamingDataFeeder( x_iter(), y_iter(), n_classes=0, batch_size=10)) func( data_feeder.StreamingDataFeeder( x_iter(True), y_iter(True), n_classes=self._wrap_dict(0, 'out'), batch_size=10)) def test_dask_data_feeder(self): if HAS_PANDAS and HAS_DASK: x = pd.DataFrame( dict( a=np.array([.1, .3, .4, .6, .2, .1, .6]), b=np.array([.7, .8, .1, .2, .5, .3, .9]))) x = dd.from_pandas(x, npartitions=2) y = pd.DataFrame(dict(labels=np.array([1, 0, 2, 1, 0, 1, 2]))) y = dd.from_pandas(y, npartitions=2) # TODO(ipolosukhin): Remove or restore this. # x = extract_dask_data(x) # y = extract_dask_labels(y) df = data_feeder.DaskDataFeeder(x, y, n_classes=2, batch_size=2) inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self.assertAllClose(feed_dict[inp.name], [[0.40000001, 0.1], [0.60000002, 0.2]]) self.assertAllClose(feed_dict[out.name], [[0., 0., 1.], [0., 1., 0.]]) # TODO(rohanj): Fix this test by fixing data_feeder. Currently, h5py doesn't # support permutation based indexing lookups (More documentation at # http://docs.h5py.org/en/latest/high/dataset.html#fancy-indexing) def DISABLED_test_hdf5_data_feeder(self): def func(df): inp, out = df.input_builder() feed_dict_fn = df.get_feed_dict_fn() feed_dict = feed_dict_fn() self._assertAllClose(inp, [[3, 4], [1, 2]], feed_dict, 'name') self.assertAllClose(out, [2, 1], feed_dict, 'name') try: import h5py # pylint: disable=g-import-not-at-top x = np.matrix([[1, 2], [3, 4]]) y = np.array([1, 2]) file_path = os.path.join(self._base_dir, 'test_hdf5.h5') h5f = h5py.File(file_path, 'w') h5f.create_dataset('x', data=x) h5f.create_dataset('y', data=y) h5f.close() h5f = h5py.File(file_path, 'r') x = h5f['x'] y = h5f['y'] func(data_feeder.DataFeeder(x, y, n_classes=0, batch_size=3)) func( data_feeder.DataFeeder( self._wrap_dict(x, 'in'), self._wrap_dict(y, 'out'), n_classes=self._wrap_dict(0, 'out'), batch_size=3)) except ImportError: print("Skipped test for hdf5 since it's not installed.") class SetupPredictDataFeederTest(DataFeederTest): """Tests for `DataFeeder.setup_predict_data_feeder`.""" def test_iterable_data(self): # pylint: disable=undefined-variable def func(df): self._assertAllClose(six.next(df), [[1, 2], [3, 4]]) self._assertAllClose(six.next(df), [[5, 6]]) data = [[1, 2], [3, 4], [5, 6]] x = iter(data) x_dict = iter([self._wrap_dict(v) for v in iter(data)]) func(data_feeder.setup_predict_data_feeder(x, batch_size=2)) func(data_feeder.setup_predict_data_feeder(x_dict, batch_size=2)) if __name__ == '__main__': test.main()
apache-2.0
harisbal/pandas
pandas/tests/arrays/sparse/test_libsparse.py
1
21936
import operator import numpy as np import pytest import pandas._libs.sparse as splib import pandas.util._test_decorators as td from pandas import Series from pandas.core.arrays.sparse import BlockIndex, IntIndex, _make_index import pandas.util.testing as tm TEST_LENGTH = 20 plain_case = dict(xloc=[0, 7, 15], xlen=[3, 5, 5], yloc=[2, 9, 14], ylen=[2, 3, 5], intersect_loc=[2, 9, 15], intersect_len=[1, 3, 4]) delete_blocks = dict(xloc=[0, 5], xlen=[4, 4], yloc=[1], ylen=[4], intersect_loc=[1], intersect_len=[3]) split_blocks = dict(xloc=[0], xlen=[10], yloc=[0, 5], ylen=[3, 7], intersect_loc=[0, 5], intersect_len=[3, 5]) skip_block = dict(xloc=[10], xlen=[5], yloc=[0, 12], ylen=[5, 3], intersect_loc=[12], intersect_len=[3]) no_intersect = dict(xloc=[0, 10], xlen=[4, 6], yloc=[5, 17], ylen=[4, 2], intersect_loc=[], intersect_len=[]) def check_cases(_check_case): def _check_case_dict(case): _check_case(case['xloc'], case['xlen'], case['yloc'], case['ylen'], case['intersect_loc'], case['intersect_len']) _check_case_dict(plain_case) _check_case_dict(delete_blocks) _check_case_dict(split_blocks) _check_case_dict(skip_block) _check_case_dict(no_intersect) # one or both is empty _check_case([0], [5], [], [], [], []) _check_case([], [], [], [], [], []) class TestSparseIndexUnion(object): def test_index_make_union(self): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) bresult = xindex.make_union(yindex) assert (isinstance(bresult, BlockIndex)) tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32)) tm.assert_numpy_array_equal(bresult.blengths, np.array(elen, dtype=np.int32)) ixindex = xindex.to_int_index() iyindex = yindex.to_int_index() iresult = ixindex.make_union(iyindex) assert (isinstance(iresult, IntIndex)) tm.assert_numpy_array_equal(iresult.indices, bresult.to_int_index().indices) """ x: ---- y: ---- r: -------- """ xloc = [0] xlen = [5] yloc = [5] ylen = [4] eloc = [0] elen = [9] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ----- ----- y: ----- -- """ xloc = [0, 10] xlen = [5, 5] yloc = [2, 17] ylen = [5, 2] eloc = [0, 10, 17] elen = [7, 5, 2] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ y: ------- r: ---------- """ xloc = [1] xlen = [5] yloc = [3] ylen = [5] eloc = [1] elen = [7] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ ----- y: ------- r: ------------- """ xloc = [2, 10] xlen = [4, 4] yloc = [4] ylen = [8] eloc = [2] elen = [12] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: --- ----- y: ------- r: ------------- """ xloc = [0, 5] xlen = [3, 5] yloc = [0] ylen = [7] eloc = [0] elen = [10] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ------ ----- y: ------- --- r: ------------- """ xloc = [2, 10] xlen = [4, 4] yloc = [4, 13] ylen = [8, 4] eloc = [2] elen = [15] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ---------------------- y: ---- ---- --- r: ---------------------- """ xloc = [2] xlen = [15] yloc = [4, 9, 14] ylen = [3, 2, 2] eloc = [2] elen = [15] _check_case(xloc, xlen, yloc, ylen, eloc, elen) """ x: ---- --- y: --- --- """ xloc = [0, 10] xlen = [3, 3] yloc = [5, 15] ylen = [2, 2] eloc = [0, 5, 10, 15] elen = [3, 2, 3, 2] _check_case(xloc, xlen, yloc, ylen, eloc, elen) def test_intindex_make_union(self): a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32)) b = IntIndex(5, np.array([0, 2], dtype=np.int32)) res = a.make_union(b) exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32)) assert res.equals(exp) a = IntIndex(5, np.array([], dtype=np.int32)) b = IntIndex(5, np.array([0, 2], dtype=np.int32)) res = a.make_union(b) exp = IntIndex(5, np.array([0, 2], np.int32)) assert res.equals(exp) a = IntIndex(5, np.array([], dtype=np.int32)) b = IntIndex(5, np.array([], dtype=np.int32)) res = a.make_union(b) exp = IntIndex(5, np.array([], np.int32)) assert res.equals(exp) a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) res = a.make_union(b) exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32)) assert res.equals(exp) a = IntIndex(5, np.array([0, 1], dtype=np.int32)) b = IntIndex(4, np.array([0, 1], dtype=np.int32)) with pytest.raises(ValueError): a.make_union(b) class TestSparseIndexIntersect(object): @td.skip_if_windows def test_intersect(self): def _check_correct(a, b, expected): result = a.intersect(b) assert (result.equals(expected)) def _check_length_exc(a, longer): pytest.raises(Exception, a.intersect, longer) def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) expected = BlockIndex(TEST_LENGTH, eloc, elen) longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen) _check_correct(xindex, yindex, expected) _check_correct(xindex.to_int_index(), yindex.to_int_index(), expected.to_int_index()) _check_length_exc(xindex, longer_index) _check_length_exc(xindex.to_int_index(), longer_index.to_int_index()) check_cases(_check_case) def test_intersect_empty(self): xindex = IntIndex(4, np.array([], dtype=np.int32)) yindex = IntIndex(4, np.array([2, 3], dtype=np.int32)) assert xindex.intersect(yindex).equals(xindex) assert yindex.intersect(xindex).equals(xindex) xindex = xindex.to_block_index() yindex = yindex.to_block_index() assert xindex.intersect(yindex).equals(xindex) assert yindex.intersect(xindex).equals(xindex) def test_intersect_identical(self): cases = [IntIndex(5, np.array([1, 2], dtype=np.int32)), IntIndex(5, np.array([0, 2, 4], dtype=np.int32)), IntIndex(0, np.array([], dtype=np.int32)), IntIndex(5, np.array([], dtype=np.int32))] for case in cases: assert case.intersect(case).equals(case) case = case.to_block_index() assert case.intersect(case).equals(case) class TestSparseIndexCommon(object): def test_int_internal(self): idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) idx = _make_index(4, np.array([], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) def test_block_internal(self): idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) idx = _make_index(4, np.array([], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 3 tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) def test_lookup(self): for kind in ['integer', 'block']: idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind) assert idx.lookup(-1) == -1 assert idx.lookup(0) == -1 assert idx.lookup(1) == -1 assert idx.lookup(2) == 0 assert idx.lookup(3) == 1 assert idx.lookup(4) == -1 idx = _make_index(4, np.array([], dtype=np.int32), kind=kind) for i in range(-1, 5): assert idx.lookup(i) == -1 idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) assert idx.lookup(-1) == -1 assert idx.lookup(0) == 0 assert idx.lookup(1) == 1 assert idx.lookup(2) == 2 assert idx.lookup(3) == 3 assert idx.lookup(4) == -1 idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) assert idx.lookup(-1) == -1 assert idx.lookup(0) == 0 assert idx.lookup(1) == -1 assert idx.lookup(2) == 1 assert idx.lookup(3) == 2 assert idx.lookup(4) == -1 def test_lookup_array(self): for kind in ['integer', 'block']: idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) exp = np.array([-1, -1, 0], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) exp = np.array([-1, 0, -1, 1], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) idx = _make_index(4, np.array([], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32)) exp = np.array([-1, -1, -1, -1], dtype=np.int32) idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) exp = np.array([-1, 0, 2], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) exp = np.array([-1, 2, 1, 3], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32)) exp = np.array([1, -1, 2, 0], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32)) exp = np.array([-1, -1, 1, -1], dtype=np.int32) tm.assert_numpy_array_equal(res, exp) def test_lookup_basics(self): def _check(index): assert (index.lookup(0) == -1) assert (index.lookup(5) == 0) assert (index.lookup(7) == 2) assert (index.lookup(8) == -1) assert (index.lookup(9) == -1) assert (index.lookup(10) == -1) assert (index.lookup(11) == -1) assert (index.lookup(12) == 3) assert (index.lookup(17) == 8) assert (index.lookup(18) == -1) bindex = BlockIndex(20, [5, 12], [3, 6]) iindex = bindex.to_int_index() _check(bindex) _check(iindex) # corner cases class TestBlockIndex(object): def test_block_internal(self): idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) idx = _make_index(4, np.array([], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) idx = _make_index(4, np.array([0, 2, 3], dtype=np.int32), kind='block') assert isinstance(idx, BlockIndex) assert idx.npoints == 3 tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) def test_make_block_boundary(self): for i in [5, 10, 100, 101]: idx = _make_index(i, np.arange(0, i, 2, dtype=np.int32), kind='block') exp = np.arange(0, i, 2, dtype=np.int32) tm.assert_numpy_array_equal(idx.blocs, exp) tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32)) def test_equals(self): index = BlockIndex(10, [0, 4], [2, 5]) assert index.equals(index) assert not index.equals(BlockIndex(10, [0, 4], [2, 6])) def test_check_integrity(self): locs = [] lengths = [] # 0-length OK # TODO: index variables are not used...is that right? index = BlockIndex(0, locs, lengths) # noqa # also OK even though empty index = BlockIndex(1, locs, lengths) # noqa # block extend beyond end pytest.raises(Exception, BlockIndex, 10, [5], [10]) # block overlap pytest.raises(Exception, BlockIndex, 10, [2, 5], [5, 3]) def test_to_int_index(self): locs = [0, 10] lengths = [4, 6] exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] block = BlockIndex(20, locs, lengths) dense = block.to_int_index() tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32)) def test_to_block_index(self): index = BlockIndex(10, [0, 5], [4, 5]) assert index.to_block_index() is index class TestIntIndex(object): def test_check_integrity(self): # Too many indices than specified in self.length msg = "Too many indices" with tm.assert_raises_regex(ValueError, msg): IntIndex(length=1, indices=[1, 2, 3]) # No index can be negative. msg = "No index can be less than zero" with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, -2, 3]) # No index can be negative. msg = "No index can be less than zero" with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, -2, 3]) # All indices must be less than the length. msg = "All indices must be less than the length" with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, 2, 5]) with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, 2, 6]) # Indices must be strictly ascending. msg = "Indices must be strictly increasing" with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, 3, 2]) with tm.assert_raises_regex(ValueError, msg): IntIndex(length=5, indices=[1, 3, 3]) def test_int_internal(self): idx = _make_index(4, np.array([2, 3], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 2 tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) idx = _make_index(4, np.array([], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 0 tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) idx = _make_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind='integer') assert isinstance(idx, IntIndex) assert idx.npoints == 4 tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) def test_equals(self): index = IntIndex(10, [0, 1, 2, 3, 4]) assert index.equals(index) assert not index.equals(IntIndex(10, [0, 1, 2, 3])) def test_to_block_index(self): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) # see if survive the round trip xbindex = xindex.to_int_index().to_block_index() ybindex = yindex.to_int_index().to_block_index() assert isinstance(xbindex, BlockIndex) assert xbindex.equals(xindex) assert ybindex.equals(yindex) check_cases(_check_case) def test_to_int_index(self): index = IntIndex(10, [2, 3, 4, 5, 6]) assert index.to_int_index() is index class TestSparseOperators(object): def _op_tests(self, sparse_op, python_op): def _check_case(xloc, xlen, yloc, ylen, eloc, elen): xindex = BlockIndex(TEST_LENGTH, xloc, xlen) yindex = BlockIndex(TEST_LENGTH, yloc, ylen) xdindex = xindex.to_int_index() ydindex = yindex.to_int_index() x = np.arange(xindex.npoints) * 10. + 1 y = np.arange(yindex.npoints) * 100. + 1 xfill = 0 yfill = 2 result_block_vals, rb_index, bfill = sparse_op(x, xindex, xfill, y, yindex, yfill) result_int_vals, ri_index, ifill = sparse_op(x, xdindex, xfill, y, ydindex, yfill) assert rb_index.to_int_index().equals(ri_index) tm.assert_numpy_array_equal(result_block_vals, result_int_vals) assert bfill == ifill # check versus Series... xseries = Series(x, xdindex.indices) xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill) yseries = Series(y, ydindex.indices) yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill) series_result = python_op(xseries, yseries) series_result = series_result.reindex(ri_index.indices) tm.assert_numpy_array_equal(result_block_vals, series_result.values) tm.assert_numpy_array_equal(result_int_vals, series_result.values) check_cases(_check_case) @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'truediv', 'floordiv']) def test_op(self, opname): sparse_op = getattr(splib, 'sparse_%s_float64' % opname) python_op = getattr(operator, opname) self._op_tests(sparse_op, python_op)
bsd-3-clause
OshynSong/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <[email protected]> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
jakobworldpeace/scikit-learn
examples/decomposition/plot_pca_vs_lda.py
176
2027
""" ======================================================= Comparison of LDA and PCA 2D projection of Iris dataset ======================================================= The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. """ print(__doc__) import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LinearDiscriminantAnalysis(n_components=2) X_r2 = lda.fit(X, y).transform(X) # Percentage of variance explained for each components print('explained variance ratio (first two components): %s' % str(pca.explained_variance_ratio_)) plt.figure() colors = ['navy', 'turquoise', 'darkorange'] lw = 2 for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8, lw=lw, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('PCA of IRIS dataset') plt.figure() for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], alpha=.8, color=color, label=target_name) plt.legend(loc='best', shadow=False, scatterpoints=1) plt.title('LDA of IRIS dataset') plt.show()
bsd-3-clause
annahs/atmos_research
AL_core_vs_coat_hexbin_long_interval.py
1
4739
import sys import os import numpy as np from pprint import pprint from datetime import datetime from datetime import timedelta import mysql.connector import math import matplotlib.pyplot as plt import matplotlib.colors import matplotlib.cm as cm from matplotlib import dates import calendar #database connection cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon') cursor = cnx.cursor() save_file = False start_date = datetime(2012,4,1) end_date = datetime(2012,5,1) hour_step = 1#336#168 min_BC_VED = 80 max_BC_VED = 220 min_rBC_mass = ((min_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15) max_rBC_mass = ((max_BC_VED/(10.**7))**3)*(math.pi/6.)*1.8*(10.**15) ##############initialize binning variables bins = [] start_size = min_BC_VED #VED in nm end_size = max_BC_VED #VED in nm interval_length = 5 #in nm #create list of size bins while start_size < end_size: bins.append(start_size) start_size += interval_length #create dictionary with size bins as keys binned_data = {} for bin in bins: binned_data[bin] = [0,0] os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/coating data/') new_data = [] while start_date < end_date: print start_date period_end = start_date + timedelta(hours = hour_step) UNIX_start_time = calendar.timegm(start_date.utctimetuple()) UNIX_end_time = calendar.timegm(period_end.utctimetuple()) cursor.execute(('''SELECT rBC_mass_fg,coat_thickness_nm_min,coat_thickness_nm_max,LF_scat_amp,UNIX_UTC_ts FROM alert_leo_coating_data WHERE UNIX_UTC_ts >= %s and UNIX_UTC_ts < %s and HK_flag = 0 and rBC_mass_fg IS NOT NULL LIMIT 10000'''), (UNIX_start_time,UNIX_end_time)) coat_data = cursor.fetchall() #hexbin plot for row in coat_data: mass = row[0] min_coat = row[1] max_coat = row[2] LEO_amp = row[3] UNIX_UTC_ts = row[4] date_time = datetime.utcfromtimestamp(UNIX_UTC_ts) VED = (((mass/(10**15*1.8))*6/math.pi)**(1/3.0))*10**7 for key in binned_data: key_value = float(key) interval_end = key_value + interval_length if VED >= key_value and VED < interval_end: binned_data[key][0] = binned_data[key][0] + 1 if LEO_amp >= 0: binned_data[key][1] = binned_data[key][1] + 1 if min_coat != None and max_coat != None and LEO_amp > 100: if VED < min_BC_VED: continue new_data.append([VED,min_coat,max_coat]) start_date = start_date + timedelta(hours = hour_step) print len(new_data) #fraction detectable fractions_detectable = [] for bin, counts in binned_data.iteritems(): bin_midpoint = bin + interval_length/2.0 total_particles = counts[0] detectable_notches = counts[1] try: fraction_detectable = detectable_notches*1.0/total_particles except: fraction_detectable=np.nan fractions_detectable.append([bin,bin+interval_length,fraction_detectable]) fractions_detectable.sort() bins = [row[0]+interval_length/2 for row in fractions_detectable] fractions = [row[2] for row in fractions_detectable] #number_max = np.max([row[3] for row in fractions_detectable]) #number_particles = [row[3]*1.0/number_max for row in fractions_detectable] core_size = [row[0] for row in new_data] coat_min_size = [row[1] for row in new_data] coat_max_size = [row[2] for row in new_data] #plotting fig = plt.figure(figsize=(8,6)) ax1 = fig.add_subplot(111) min = ax1.hexbin(core_size, coat_min_size, cmap=cm.jet, gridsize = 40,mincnt=1)#, norm= norm) #bins='log', norm=norm #ax1.hist2d(core_size, coat_size, bins = 60) ax1.set_xlabel('rBC core diameter') ax1.set_ylabel('Coating thickness') ax1.set_ylim(-30,220) ax1.set_xlim(min_BC_VED,max_BC_VED) fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.9, 0.15, 0.02, 0.7]) cbar = fig.colorbar(min, cax=cbar_ax) cbar.ax.get_yaxis().labelpad = 10 cbar.set_label('# of particles') ax2 = ax1.twinx() ax2.scatter(bins, fractions, color = 'r') #ax2.scatter(bins, number_particles, color = 'k') ax2.set_ylabel('fraction of detectable notch positions',color='r') ax2.set_ylim(0,1) plt.xlim(min_BC_VED,max_BC_VED) #ax3 = fig.add_subplot(212) #ax3.hexbin(core_size, coat_max_size, cmap=cm.jet, gridsize = 40,mincnt=1)#, norm= norm) #bins='log', norm=norm #ax3.set_xlabel('rBC core diameter') #ax3.set_ylabel('Maximum coating thickness') #ax3.set_ylim(-30,220) #ax3.set_xlim(min_BC_VED,max_BC_VED) # #ax4 = ax3.twinx() #ax4.scatter(bins, fractions, color = 'r') ##ax4.scatter(bins, number_particles, color = 'k') #ax4.set_ylabel('fraction of detectable notch positions',color='r') #ax4.set_ylim(0,1) #plt.xlim(min_BC_VED,max_BC_VED) plt.savefig('C:/Users/Sarah Hanna/Documents/Data/Alert Data/seasonal coating/April 2012.png', bbox_inches='tight') plt.show() cnx.close()
mit
kagayakidan/scikit-learn
sklearn/metrics/pairwise.py
49
44088
# -*- coding: utf-8 -*- # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Robert Layton <[email protected]> # Andreas Mueller <[email protected]> # Philippe Gervais <[email protected]> # Lars Buitinck <[email protected]> # Joel Nothman <[email protected]> # License: BSD 3 clause import itertools import numpy as np from scipy.spatial import distance from scipy.sparse import csr_matrix from scipy.sparse import issparse from ..utils import check_array from ..utils import gen_even_slices from ..utils import gen_batches from ..utils.fixes import partial from ..utils.extmath import row_norms, safe_sparse_dot from ..preprocessing import normalize from ..externals.joblib import Parallel from ..externals.joblib import delayed from ..externals.joblib.parallel import cpu_count from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan # Utility Functions def _return_float_dtype(X, Y): """ 1. If dtype of X and Y is float32, then dtype float32 is returned. 2. Else dtype float is returned. """ if not issparse(X) and not isinstance(X, np.ndarray): X = np.asarray(X) if Y is None: Y_dtype = X.dtype elif not issparse(Y) and not isinstance(Y, np.ndarray): Y = np.asarray(Y) Y_dtype = Y.dtype else: Y_dtype = Y.dtype if X.dtype == Y_dtype == np.float32: dtype = np.float32 else: dtype = np.float return X, Y, dtype def check_pairwise_arrays(X, Y, precomputed=False): """ Set X and Y appropriately and checks inputs If Y is None, it is set as a pointer to X (i.e. not a copy). If Y is given, this does not happen. All distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the second dimension of the two arrays is equal, or the equivalent check for a precomputed distance matrix. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) precomputed : bool True if X is to be treated as precomputed distances to the samples in Y. Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y, dtype = _return_float_dtype(X, Y) if Y is X or Y is None: X = Y = check_array(X, accept_sparse='csr', dtype=dtype) else: X = check_array(X, accept_sparse='csr', dtype=dtype) Y = check_array(Y, accept_sparse='csr', dtype=dtype) if precomputed: if X.shape[1] != Y.shape[0]: raise ValueError("Precomputed metric requires shape " "(n_queries, n_indexed). Got (%d, %d) " "for %d indexed." % (X.shape[0], X.shape[1], Y.shape[0])) elif X.shape[1] != Y.shape[1]: raise ValueError("Incompatible dimension for X and Y matrices: " "X.shape[1] == %d while Y.shape[1] == %d" % ( X.shape[1], Y.shape[1])) return X, Y def check_paired_arrays(X, Y): """ Set X and Y appropriately and checks inputs for paired distances All paired distance metrics should use this function first to assert that the given parameters are correct and safe to use. Specifically, this function first ensures that both X and Y are arrays, then checks that they are at least two dimensional while ensuring that their elements are floats. Finally, the function checks that the size of the dimensions of the two arrays are equal. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_a, n_features) Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) Returns ------- safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features) An array equal to X, guaranteed to be a numpy array. safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features) An array equal to Y if Y was not None, guaranteed to be a numpy array. If Y was None, safe_Y will be a pointer to X. """ X, Y = check_pairwise_arrays(X, Y) if X.shape != Y.shape: raise ValueError("X and Y should be of same shape. They were " "respectively %r and %r long." % (X.shape, Y.shape)) return X, Y # Pairwise distances def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False, X_norm_squared=None): """ Considering the rows of X (and Y=X) as vectors, compute the distance matrix between each pair of vectors. For efficiency reasons, the euclidean distance between a pair of row vector x and y is computed as:: dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y)) This formulation has two advantages over other ways of computing distances. First, it is computationally efficient when dealing with sparse data. Second, if one argument varies but the other remains unchanged, then `dot(x, x)` and/or `dot(y, y)` can be pre-computed. However, this is not the most precise way of doing this computation, and the distance matrix returned by this function may not be exactly symmetric as required by, e.g., ``scipy.spatial.distance`` functions. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples_1, n_features) Y : {array-like, sparse matrix}, shape (n_samples_2, n_features) Y_norm_squared : array-like, shape (n_samples_2, ), optional Pre-computed dot-products of vectors in Y (e.g., ``(Y**2).sum(axis=1)``) squared : boolean, optional Return squared Euclidean distances. X_norm_squared : array-like, shape = [n_samples_1], optional Pre-computed dot-products of vectors in X (e.g., ``(X**2).sum(axis=1)``) Returns ------- distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2) Examples -------- >>> from sklearn.metrics.pairwise import euclidean_distances >>> X = [[0, 1], [1, 1]] >>> # distance between rows of X >>> euclidean_distances(X, X) array([[ 0., 1.], [ 1., 0.]]) >>> # get distance to origin >>> euclidean_distances(X, [[0, 0]]) array([[ 1. ], [ 1.41421356]]) See also -------- paired_distances : distances betweens pairs of elements of X and Y. """ X, Y = check_pairwise_arrays(X, Y) if X_norm_squared is not None: XX = check_array(X_norm_squared) if XX.shape == (1, X.shape[0]): XX = XX.T elif XX.shape != (X.shape[0], 1): raise ValueError( "Incompatible dimensions for X and X_norm_squared") else: XX = row_norms(X, squared=True)[:, np.newaxis] if X is Y: # shortcut in the common case euclidean_distances(X, X) YY = XX.T elif Y_norm_squared is not None: YY = np.atleast_2d(Y_norm_squared) if YY.shape != (1, Y.shape[0]): raise ValueError( "Incompatible dimensions for Y and Y_norm_squared") else: YY = row_norms(Y, squared=True)[np.newaxis, :] distances = safe_sparse_dot(X, Y.T, dense_output=True) distances *= -2 distances += XX distances += YY np.maximum(distances, 0, out=distances) if X is Y: # Ensure that distances between vectors and themselves are set to 0.0. # This may not be the case due to floating point rounding errors. distances.flat[::distances.shape[0] + 1] = 0.0 return distances if squared else np.sqrt(distances, out=distances) def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). The minimal distances are also returned. This is mostly equivalent to calling: (pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis), pairwise_distances(X, Y=Y, metric=metric).min(axis=axis)) but uses much less memory, and is faster for large arrays. Parameters ---------- X, Y : {array-like, sparse matrix} Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable, default 'euclidean' metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict, optional Keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. distances : numpy.ndarray distances[i] is the distance between the i-th row in X and the argmin[i]-th row in Y. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin """ dist_func = None if metric in PAIRWISE_DISTANCE_FUNCTIONS: dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif not callable(metric) and not isinstance(metric, str): raise ValueError("'metric' must be a string or a callable") X, Y = check_pairwise_arrays(X, Y) if metric_kwargs is None: metric_kwargs = {} if axis == 0: X, Y = Y, X # Allocate output arrays indices = np.empty(X.shape[0], dtype=np.intp) values = np.empty(X.shape[0]) values.fill(np.infty) for chunk_x in gen_batches(X.shape[0], batch_size): X_chunk = X[chunk_x, :] for chunk_y in gen_batches(Y.shape[0], batch_size): Y_chunk = Y[chunk_y, :] if dist_func is not None: if metric == 'euclidean': # special case, for speed d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True) d_chunk *= -2 d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis] d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :] np.maximum(d_chunk, 0, d_chunk) else: d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs) else: d_chunk = pairwise_distances(X_chunk, Y_chunk, metric=metric, **metric_kwargs) # Update indices and minimum values using chunk min_indices = d_chunk.argmin(axis=1) min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start), min_indices] flags = values[chunk_x] > min_values indices[chunk_x][flags] = min_indices[flags] + chunk_y.start values[chunk_x][flags] = min_values[flags] if metric == "euclidean" and not metric_kwargs.get("squared", False): np.sqrt(values, values) return indices, values def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean", batch_size=500, metric_kwargs=None): """Compute minimum distances between one point and a set of points. This function computes for each row in X, the index of the row of Y which is closest (according to the specified distance). This is mostly equivalent to calling: pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis) but uses much less memory, and is faster for large arrays. This function works with dense 2D arrays only. Parameters ---------- X : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) Y : array-like Arrays containing points. Respective shapes (n_samples1, n_features) and (n_samples2, n_features) batch_size : integer To reduce memory consumption over the naive solution, data are processed in batches, comprising batch_size rows of X and batch_size rows of Y. The default value is quite conservative, but can be changed for fine-tuning. The larger the number, the larger the memory usage. metric : string or callable metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_kwargs : dict keyword arguments to pass to specified metric function. axis : int, optional, default 1 Axis along which the argmin and distances are to be computed. Returns ------- argmin : numpy.ndarray Y[argmin[i], :] is the row in Y that is closest to X[i, :]. See also -------- sklearn.metrics.pairwise_distances sklearn.metrics.pairwise_distances_argmin_min """ if metric_kwargs is None: metric_kwargs = {} return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size, metric_kwargs)[0] def manhattan_distances(X, Y=None, sum_over_features=True, size_threshold=5e8): """ Compute the L1 distances between the vectors in X and Y. With sum_over_features equal to False it returns the componentwise distances. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like An array with shape (n_samples_X, n_features). Y : array_like, optional An array with shape (n_samples_Y, n_features). sum_over_features : bool, default=True If True the function returns the pairwise distance matrix else it returns the componentwise L1 pairwise-distances. Not supported for sparse matrix inputs. size_threshold : int, default=5e8 Unused parameter. Returns ------- D : array If sum_over_features is False shape is (n_samples_X * n_samples_Y, n_features) and D contains the componentwise L1 pairwise-distances (ie. absolute difference), else shape is (n_samples_X, n_samples_Y) and D contains the pairwise L1 distances. Examples -------- >>> from sklearn.metrics.pairwise import manhattan_distances >>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS array([[ 0.]]) >>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS array([[ 1.]]) >>> manhattan_distances([[1, 2], [3, 4]],\ [[1, 2], [0, 3]])#doctest:+ELLIPSIS array([[ 0., 2.], [ 4., 4.]]) >>> import numpy as np >>> X = np.ones((1, 2)) >>> y = 2 * np.ones((2, 2)) >>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS array([[ 1., 1.], [ 1., 1.]]...) """ X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError("sum_over_features=%r not supported" " for sparse matrices" % sum_over_features) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, X.shape[1], D) return D if sum_over_features: return distance.cdist(X, Y, 'cityblock') D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1])) def cosine_distances(X, Y=None): """ Compute cosine distance between samples in X and Y. Cosine distance is defined as 1.0 minus the cosine similarity. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array_like, sparse matrix with shape (n_samples_X, n_features). Y : array_like, sparse matrix (optional) with shape (n_samples_Y, n_features). Returns ------- distance matrix : array An array with shape (n_samples_X, n_samples_Y). See also -------- sklearn.metrics.pairwise.cosine_similarity scipy.spatial.distance.cosine (dense matrices only) """ # 1.0 - cosine_similarity(X, Y) without copy S = cosine_similarity(X, Y) S *= -1 S += 1 return S # Paired distances def paired_euclidean_distances(X, Y): """ Computes the paired euclidean distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) return row_norms(X - Y) def paired_manhattan_distances(X, Y): """Compute the L1 distances between the vectors in X and Y. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray (n_samples, ) """ X, Y = check_paired_arrays(X, Y) diff = X - Y if issparse(diff): diff.data = np.abs(diff.data) return np.squeeze(np.array(diff.sum(axis=1))) else: return np.abs(diff).sum(axis=-1) def paired_cosine_distances(X, Y): """ Computes the paired cosine distances between X and Y Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array-like, shape (n_samples, n_features) Y : array-like, shape (n_samples, n_features) Returns ------- distances : ndarray, shape (n_samples, ) Notes ------ The cosine distance is equivalent to the half the squared euclidean distance if each sample is normalized to unit norm """ X, Y = check_paired_arrays(X, Y) return .5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { 'cosine': paired_cosine_distances, 'euclidean': paired_euclidean_distances, 'l2': paired_euclidean_distances, 'l1': paired_manhattan_distances, 'manhattan': paired_manhattan_distances, 'cityblock': paired_manhattan_distances} def paired_distances(X, Y, metric="euclidean", **kwds): """ Computes the paired distances between X and Y. Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc... Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : ndarray (n_samples, n_features) Array 1 for distance computation. Y : ndarray (n_samples, n_features) Array 2 for distance computation. metric : string or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options specified in PAIRED_DISTANCES, including "euclidean", "manhattan", or "cosine". Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. Returns ------- distances : ndarray (n_samples, ) Examples -------- >>> from sklearn.metrics.pairwise import paired_distances >>> X = [[0, 1], [1, 1]] >>> Y = [[0, 1], [2, 1]] >>> paired_distances(X, Y) array([ 0., 1.]) See also -------- pairwise_distances : pairwise distances. """ if metric in PAIRED_DISTANCES: func = PAIRED_DISTANCES[metric] return func(X, Y) elif callable(metric): # Check the matrix first (it is usually done by the metric) X, Y = check_paired_arrays(X, Y) distances = np.zeros(len(X)) for i in range(len(X)): distances[i] = metric(X[i], Y[i]) return distances else: raise ValueError('Unknown distance %s' % metric) # Kernels def linear_kernel(X, Y=None): """ Compute the linear kernel between X and Y. Read more in the :ref:`User Guide <linear_kernel>`. Parameters ---------- X : array of shape (n_samples_1, n_features) Y : array of shape (n_samples_2, n_features) Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) return safe_sparse_dot(X, Y.T, dense_output=True) def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1): """ Compute the polynomial kernel between X and Y:: K(X, Y) = (gamma <X, Y> + coef0)^degree Read more in the :ref:`User Guide <polynomial_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 degree : int, default 3 Returns ------- Gram matrix : array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 K **= degree return K def sigmoid_kernel(X, Y=None, gamma=None, coef0=1): """ Compute the sigmoid kernel between X and Y:: K(X, Y) = tanh(gamma <X, Y> + coef0) Read more in the :ref:`User Guide <sigmoid_kernel>`. Parameters ---------- X : ndarray of shape (n_samples_1, n_features) Y : ndarray of shape (n_samples_2, n_features) coef0 : int, default 1 Returns ------- Gram matrix: array of shape (n_samples_1, n_samples_2) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = safe_sparse_dot(X, Y.T, dense_output=True) K *= gamma K += coef0 np.tanh(K, K) # compute tanh in-place return K def rbf_kernel(X, Y=None, gamma=None): """ Compute the rbf (gaussian) kernel between X and Y:: K(x, y) = exp(-gamma ||x-y||^2) for each pair of rows x in X and y in Y. Read more in the :ref:`User Guide <rbf_kernel>`. Parameters ---------- X : array of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) """ X, Y = check_pairwise_arrays(X, Y) if gamma is None: gamma = 1.0 / X.shape[1] K = euclidean_distances(X, Y, squared=True) K *= -gamma np.exp(K, K) # exponentiate K in-place return K def cosine_similarity(X, Y=None, dense_output=True): """Compute cosine similarity between samples in X and Y. Cosine similarity, or the cosine kernel, computes similarity as the normalized dot product of X and Y: K(X, Y) = <X, Y> / (||X||*||Y||) On L2-normalized data, this function is equivalent to linear_kernel. Read more in the :ref:`User Guide <cosine_similarity>`. Parameters ---------- X : ndarray or sparse array, shape: (n_samples_X, n_features) Input data. Y : ndarray or sparse array, shape: (n_samples_Y, n_features) Input data. If ``None``, the output will be the pairwise similarities between all samples in ``X``. dense_output : boolean (optional), default True Whether to return dense output even when the input is sparse. If ``False``, the output is sparse if both input arrays are sparse. Returns ------- kernel matrix : array An array with shape (n_samples_X, n_samples_Y). """ # to avoid recursive import X, Y = check_pairwise_arrays(X, Y) X_normalized = normalize(X, copy=True) if X is Y: Y_normalized = X_normalized else: Y_normalized = normalize(Y, copy=True) K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output) return K def additive_chi2_kernel(X, Y=None): """Computes the additive chi-squared kernel between observations in X and Y The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = -Sum [(x - y)^2 / (x + y)] It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Notes ----- As the negative of a distance, this kernel is only conditionally positive definite. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- chi2_kernel : The exponentiated version of the kernel, which is usually preferable. sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to this kernel. """ if issparse(X) or issparse(Y): raise ValueError("additive_chi2 does not support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if (X < 0).any(): raise ValueError("X contains negative values.") if Y is not X and (Y < 0).any(): raise ValueError("Y contains negative values.") result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype) _chi2_kernel_fast(X, Y, result) return result def chi2_kernel(X, Y=None, gamma=1.): """Computes the exponential chi-squared kernel X and Y. The chi-squared kernel is computed between each pair of rows in X and Y. X and Y have to be non-negative. This kernel is most commonly applied to histograms. The chi-squared kernel is given by:: k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)]) It can be interpreted as a weighted difference per entry. Read more in the :ref:`User Guide <chi2_kernel>`. Parameters ---------- X : array-like of shape (n_samples_X, n_features) Y : array of shape (n_samples_Y, n_features) gamma : float, default=1. Scaling parameter of the chi2 kernel. Returns ------- kernel_matrix : array of shape (n_samples_X, n_samples_Y) References ---------- * Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C. Local features and kernels for classification of texture and object categories: A comprehensive study International Journal of Computer Vision 2007 http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf See also -------- additive_chi2_kernel : The additive version of this kernel sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation to the additive version of this kernel. """ K = additive_chi2_kernel(X, Y) K *= gamma return np.exp(K, K) # Helper functions - distance PAIRWISE_DISTANCE_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'cityblock': manhattan_distances, 'cosine': cosine_distances, 'euclidean': euclidean_distances, 'l2': euclidean_distances, 'l1': manhattan_distances, 'manhattan': manhattan_distances, 'precomputed': None, # HACK: precomputed is always allowed, never called } def distance_metrics(): """Valid metrics for pairwise_distances. This function simply returns the valid pairwise distance metrics. It exists to allow for a description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: ============ ==================================== metric Function ============ ==================================== 'cityblock' metrics.pairwise.manhattan_distances 'cosine' metrics.pairwise.cosine_distances 'euclidean' metrics.pairwise.euclidean_distances 'l1' metrics.pairwise.manhattan_distances 'l2' metrics.pairwise.euclidean_distances 'manhattan' metrics.pairwise.manhattan_distances ============ ==================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_DISTANCE_FUNCTIONS def _parallel_pairwise(X, Y, func, n_jobs, **kwds): """Break the pairwise matrix in n_jobs even slices and compute them in parallel""" if n_jobs < 0: n_jobs = max(cpu_count() + 1 + n_jobs, 1) if Y is None: Y = X if n_jobs == 1: # Special case to avoid picklability checks in delayed return func(X, Y, **kwds) # TODO: in some cases, backend='threading' may be appropriate fd = delayed(func) ret = Parallel(n_jobs=n_jobs, verbose=0)( fd(X, Y[s], **kwds) for s in gen_even_slices(Y.shape[0], n_jobs)) return np.hstack(ret) def _pairwise_callable(X, Y, metric, **kwds): """Handle the callable case for pairwise_{distances,kernels} """ X, Y = check_pairwise_arrays(X, Y) if X is Y: # Only calculate metric for upper triangle out = np.zeros((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.combinations(range(X.shape[0]), 2) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) # Make symmetric # NB: out += out.T will produce incorrect results out = out + out.T # Calculate diagonal # NB: nonzero diagonals are allowed for both metrics and kernels for i in range(X.shape[0]): x = X[i] out[i, i] = metric(x, x, **kwds) else: # Calculate all cells out = np.empty((X.shape[0], Y.shape[0]), dtype='float') iterator = itertools.product(range(X.shape[0]), range(Y.shape[0])) for i, j in iterator: out[i, j] = metric(X[i], Y[j], **kwds) return out _VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"] def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds): """ Compute the distance matrix from a vector array X and optional Y. This method takes either a vector array or a distance matrix, and returns a distance matrix. If the input is a vector array, the distances are computed. If the input is a distances matrix, it is returned instead. This method provides a safe way to take a distance matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise distance between the arrays from both X and Y. Valid values for metric are: - From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan']. These metrics support sparse matrix inputs. - From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. These metrics do not support sparse matrix inputs. Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are valid scipy.spatial.distance metrics), the scikit-learn implementation will be used, which is faster and has support for sparse matrices (except for 'cityblock'). For a verbose description of the metrics from scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics function. Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. Y : array [n_samples_b, n_features], optional An optional second feature array. Only allowed if metric != "precomputed". metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by scipy.spatial.distance.pdist for its metric parameter, or a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS. If metric is "precomputed", X is assumed to be a distance matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. `**kwds` : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A distance matrix D such that D_{i, j} is the distance between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then D_{i, j} is the distance between the ith array from X and the jth array from Y. """ if (metric not in _VALID_METRICS and not callable(metric) and metric != "precomputed"): raise ValueError("Unknown metric %s. " "Valid metrics are %s, or 'precomputed', or a " "callable" % (metric, _VALID_METRICS)) if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif metric in PAIRWISE_DISTANCE_FUNCTIONS: func = PAIRWISE_DISTANCE_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: if issparse(X) or issparse(Y): raise TypeError("scipy distance metrics do not" " support sparse matrices.") X, Y = check_pairwise_arrays(X, Y) if n_jobs == 1 and X is Y: return distance.squareform(distance.pdist(X, metric=metric, **kwds)) func = partial(distance.cdist, metric=metric, **kwds) return _parallel_pairwise(X, Y, func, n_jobs, **kwds) # Helper functions - distance PAIRWISE_KERNEL_FUNCTIONS = { # If updating this dictionary, update the doc in both distance_metrics() # and also in pairwise_distances()! 'additive_chi2': additive_chi2_kernel, 'chi2': chi2_kernel, 'linear': linear_kernel, 'polynomial': polynomial_kernel, 'poly': polynomial_kernel, 'rbf': rbf_kernel, 'sigmoid': sigmoid_kernel, 'cosine': cosine_similarity, } def kernel_metrics(): """ Valid metrics for pairwise_kernels This function simply returns the valid pairwise distance metrics. It exists, however, to allow for a verbose description of the mapping for each of the valid strings. The valid distance metrics, and the function they map to, are: =============== ======================================== metric Function =============== ======================================== 'additive_chi2' sklearn.pairwise.additive_chi2_kernel 'chi2' sklearn.pairwise.chi2_kernel 'linear' sklearn.pairwise.linear_kernel 'poly' sklearn.pairwise.polynomial_kernel 'polynomial' sklearn.pairwise.polynomial_kernel 'rbf' sklearn.pairwise.rbf_kernel 'sigmoid' sklearn.pairwise.sigmoid_kernel 'cosine' sklearn.pairwise.cosine_similarity =============== ======================================== Read more in the :ref:`User Guide <metrics>`. """ return PAIRWISE_KERNEL_FUNCTIONS KERNEL_PARAMS = { "additive_chi2": (), "chi2": (), "cosine": (), "exp_chi2": frozenset(["gamma"]), "linear": (), "poly": frozenset(["gamma", "degree", "coef0"]), "polynomial": frozenset(["gamma", "degree", "coef0"]), "rbf": frozenset(["gamma"]), "sigmoid": frozenset(["gamma", "coef0"]), } def pairwise_kernels(X, Y=None, metric="linear", filter_params=False, n_jobs=1, **kwds): """Compute the kernel between arrays X and optional array Y. This method takes either a vector array or a kernel matrix, and returns a kernel matrix. If the input is a vector array, the kernels are computed. If the input is a kernel matrix, it is returned instead. This method provides a safe way to take a kernel matrix as input, while preserving compatibility with many other algorithms that take a vector array. If Y is given (default is None), then the returned matrix is the pairwise kernel between the arrays from both X and Y. Valid values for metric are:: ['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine'] Read more in the :ref:`User Guide <metrics>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise kernels between samples, or a feature array. Y : array [n_samples_b, n_features] A second feature array only if X has shape [n_samples_a, n_features]. metric : string, or callable The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. n_jobs : int The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. filter_params: boolean Whether to filter invalid parameters or not. `**kwds` : optional keyword parameters Any further parameters are passed directly to the kernel function. Returns ------- K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b] A kernel matrix K such that K_{i, j} is the kernel between the ith and jth vectors of the given matrix X, if Y is None. If Y is not None, then K_{i, j} is the kernel between the ith array from X and the jth array from Y. Notes ----- If metric is 'precomputed', Y is ignored and X is returned. """ if metric == "precomputed": X, _ = check_pairwise_arrays(X, Y, precomputed=True) return X elif metric in PAIRWISE_KERNEL_FUNCTIONS: if filter_params: kwds = dict((k, kwds[k]) for k in kwds if k in KERNEL_PARAMS[metric]) func = PAIRWISE_KERNEL_FUNCTIONS[metric] elif callable(metric): func = partial(_pairwise_callable, metric=metric, **kwds) else: raise ValueError("Unknown kernel %r" % metric) return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
bsd-3-clause
CodeForPhilly/chime
src/chime_dash/app/utils/templates.py
1
7202
"""utils/templates Utility functions for localization templates templates themselves can be found in app/templates/en """ import dash_daq as daq from typing import Dict, Any, Optional from os import path from yaml import safe_load from numpy import mod from pandas import DataFrame from dash_html_components import Table, Thead, Tbody, Tr, Td, Th, H4, Hr from dash_core_components import DatePickerSingle from dash_bootstrap_components import FormGroup, Label, Input, Checklist from penn_chime.model.parameters import Parameters # Consider moving this to a config file eventually TEMPLATE_DIR = path.join( path.abspath(path.dirname(path.dirname(__file__))), "templates" ) LABEL_STYLE = {"fontSize": "0.875rem", "marginBottom": "0.3333em"} HEADER_STYLE = { "fontSize": "1rem", "fontWeight": "bold", "margin-bottom": "1rem", } LINE_STYLE = { "width": "30%", "text-align": "center", "color": "#cccccc" } def read_localization_yml(file: str, language: str) -> Dict[str, Any]: """Reads localization template. Arguments: file: Name of the section plus `.yml` language: Localization info Raises: KeyError: If no template for file/language exists. """ file_address = path.join(TEMPLATE_DIR, language, file) if not path.exists(file_address): raise KeyError( "No template found for language '{language}' and section '{file}'".format( file=file, language=language ) ) with open(file_address, "r") as stream: yaml = safe_load(stream) return yaml def read_localization_markdown(file: str, language: str) -> str: """Reads localization template. Arguments: file: Name of the section plus `.md` language: Localization info Raises: KeyError: If no template for file/language exists. """ file_address = path.join(TEMPLATE_DIR, language, file) if not path.exists(file_address): raise KeyError( "No template found for langage '{language}' and section '{file}'".format( file=file, language=language ) ) with open(file_address, "r") as stream: md = stream.read() return md def df_to_html_table( dataframe: DataFrame, data_only: bool = False, n_mod: Optional[int] = None, formats: Optional[Dict[Any, str]] = None, ) -> Table: """Converts pandas data frame to html table """ formats = formats or {} def cast_type(val): for dtype, cast in formats.items(): if isinstance(val, dtype): try: val = cast(val) break except ValueError: break return val index_name = dataframe.index.name index_name = index_name or "#" tmp = dataframe.copy() if n_mod is not None: tmp = tmp[mod(tmp.index, n_mod) == 0].copy() data = [ Thead([Tr([Th(index_name)] + [Th(col) for col in tmp.columns])]), Tbody( [ Tr([Th(cast_type(idx))] + [Td(cast_type(col)) for col in row]) for idx, row in tmp.iterrows() ] ), ] return data if data_only else Table(data) def create_number_input( idx: str, data: Dict[str, Any], content: Dict[str, str], defaults: Parameters, debounce: bool = True, ): """Returns number formgroup for given form data. Arguments: idx: The name of the varibale (html id) data: Input form kwargs. content: Localization text defaults: Parameters to infer defaults debounce: Trigger callback on enter or unfocus """ input_kwargs = data.copy() input_kwargs.pop("percent", None) if not "value" in input_kwargs: input_kwargs["value"] = _get_default_values( idx, defaults, min_val=data.get("min", None), max_val=data.get("max", None) ) return FormGroup( id=f'group_{idx}', children=[ Label(html_for=idx, children=content[idx], style=LABEL_STYLE), Input(id=idx, debounce=debounce, **input_kwargs), ] ) def create_header(idx: str, content: Dict[str, str]): """ Create heading element using localization map """ return H4(id=idx, children=content[idx], style=HEADER_STYLE) def create_line_break(idx: str): return Hr(id=idx, style=LINE_STYLE) def create_date_input( idx: str, data: Dict[str, Any], content: Dict[str, str], defaults: Parameters ): """Returns number formgroup for given form data. Arguments: idx: The name of the varibale (html id) data: Input form kwargs. content: Localization text defaults: Parameters to infer defaults """ input_kwargs = data.copy() input_kwargs.pop("type") if not "date" in input_kwargs: input_kwargs["date"] = input_kwargs[ "initial_visible_month" ] = _get_default_values(idx, defaults) if 'style' in input_kwargs: style = {'style': input_kwargs.pop('style')} else: style = {} return FormGroup( id=f'group_{idx}', **style, children=[ Label(html_for=idx, children=content[idx], style=LABEL_STYLE), DatePickerSingle( className="form-control", day_size=32, display_format='YYYY-MM-DD', id=idx, **input_kwargs ), ] ) def create_switch_input(idx: str, data: Dict[str, Any], content: Dict[str, str]): """Returns switch for given form data. Arguments: idx: The name of the varibale (html id) data: Input form kwargs. content: Localization text defaults: Parameters to infer defaults """ return daq.BooleanSwitch( id=idx, on=False, label=content[idx] ) def _get_default_values( key: str, defaults: Parameters, min_val: Optional[float] = None, max_val: Optional[float] = None, ) -> float: """Tries to infer default values given parameters. Ensures that value is between min and max. Min defaults to zero if not given. Max will be ignored if not given. Arguments: key: The name of the varibale (html id) defaults: Parameters to infer defaults min_val: Min boundary of form max_val: Max boundary of form """ min_val = 0 if min_val is None else min_val if "rate" in key: val = ( defaults.dispositions[key.split("_")[0]].rate if key.split("_")[0] in defaults.dispositions else getattr(defaults, key, min_val) ) * 100 elif "los" in key: val = ( defaults.dispositions[key.split("_")[0]].days if key.split("_")[0] in defaults.dispositions else getattr(defaults, key, min_val) ) elif "share" in key: val = getattr(defaults, key, min_val) * 100 elif "susceptible" in key: val = defaults.region.susceptible else: val = getattr(defaults, key, min_val) return min(max_val, val) if max_val is not None else val
mit
joshloyal/scikit-learn
sklearn/linear_model/least_angle.py
15
57631
""" Least Angle Regression algorithm. See the documentation on the Generalized Linear Model for a complete discussion. """ from __future__ import print_function # Author: Fabian Pedregosa <[email protected]> # Alexandre Gramfort <[email protected]> # Gael Varoquaux # # License: BSD 3 clause from math import log import sys import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg, interpolate from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel from ..base import RegressorMixin from ..utils import arrayfuncs, as_float_array, check_X_y, deprecated from ..model_selection import check_cv from ..exceptions import ConvergenceWarning from ..externals.joblib import Parallel, delayed from ..externals.six.moves import xrange from ..externals.six import string_types import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): solve_triangular_args = {'check_finite': False} def lars_path(X, y, Xy=None, Gram=None, max_iter=500, alpha_min=0, method='lar', copy_X=True, eps=np.finfo(np.float).eps, copy_Gram=True, verbose=0, return_path=True, return_n_iter=False, positive=False): """Compute Least Angle Regression or Lasso path using LARS algorithm [1] The optimization objective for the case method='lasso' is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 in the case of method='lars', the objective function is only known in the form of an implicit equation (see discussion in [1]) Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ----------- X : array, shape: (n_samples, n_features) Input data. y : array, shape: (n_samples) Input targets. positive : boolean (default=False) Restrict coefficients to be >= 0. When using this option together with method 'lasso' the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha (neither will they when using method 'lar' ..). Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent lasso_path function. max_iter : integer, optional (default=500) Maximum number of iterations to perform, set to infinity for no limit. Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features. alpha_min : float, optional (default=0) Minimum correlation along the path. It corresponds to the regularization parameter alpha parameter in the Lasso. method : {'lar', 'lasso'}, optional (default='lar') Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. eps : float, optional (default=``np.finfo(np.float).eps``) The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : bool, optional (default=True) If ``False``, ``X`` is overwritten. copy_Gram : bool, optional (default=True) If ``False``, ``Gram`` is overwritten. verbose : int (default=0) Controls output verbosity. return_path : bool, optional (default=True) If ``return_path==True`` returns the entire path, else returns only the last point of the path. return_n_iter : bool, optional (default=False) Whether to return the number of iterations. Returns -------- alphas : array, shape: [n_alphas + 1] Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter``, ``n_features`` or the number of nodes in the path with ``alpha >= alpha_min``, whichever is smaller. active : array, shape [n_alphas] Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas + 1) Coefficients along the path n_iter : int Number of iterations run. Returned only if return_n_iter is set to True. See also -------- lasso_path LassoLars Lars LassoLarsCV LarsCV sklearn.decomposition.sparse_encode References ---------- .. [1] "Least Angle Regression", Effron et al. http://statweb.stanford.edu/~tibs/ftp/lars.pdf .. [2] `Wikipedia entry on the Least-angle regression <https://en.wikipedia.org/wiki/Least-angle_regression>`_ .. [3] `Wikipedia entry on the Lasso <https://en.wikipedia.org/wiki/Lasso_(statistics)>`_ """ n_features = X.shape[1] n_samples = y.size max_features = min(max_iter, n_features) if return_path: coefs = np.zeros((max_features + 1, n_features)) alphas = np.zeros(max_features + 1) else: coef, prev_coef = np.zeros(n_features), np.zeros(n_features) alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas? n_iter, n_active = 0, 0 active, indices = list(), np.arange(n_features) # holds the sign of covariance sign_active = np.empty(max_features, dtype=np.int8) drop = False # will hold the cholesky factorization. Only lower part is # referenced. # We are initializing this to "zeros" and not empty, because # it is passed to scipy linalg functions and thus if it has NaNs, # even if they are in the upper part that it not used, we # get errors raised. # Once we support only scipy > 0.12 we can use check_finite=False and # go back to "empty" L = np.zeros((max_features, max_features), dtype=X.dtype) swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,)) solve_cholesky, = get_lapack_funcs(('potrs',), (X,)) if Gram is None: if copy_X: # force copy. setting the array to be fortran-ordered # speeds up the calculation of the (partial) Gram matrix # and allows to easily swap columns X = X.copy('F') elif isinstance(Gram, string_types) and Gram == 'auto': Gram = None if X.shape[0] > X.shape[1]: Gram = np.dot(X.T, X) elif copy_Gram: Gram = Gram.copy() if Xy is None: Cov = np.dot(X.T, y) else: Cov = Xy.copy() if verbose: if verbose > 1: print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") else: sys.stdout.write('.') sys.stdout.flush() tiny = np.finfo(np.float).tiny # to avoid division by 0 warning tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning equality_tolerance = np.finfo(np.float32).eps while True: if Cov.size: if positive: C_idx = np.argmax(Cov) else: C_idx = np.argmax(np.abs(Cov)) C_ = Cov[C_idx] if positive: C = C_ else: C = np.fabs(C_) else: C = 0. if return_path: alpha = alphas[n_iter, np.newaxis] coef = coefs[n_iter] prev_alpha = alphas[n_iter - 1, np.newaxis] prev_coef = coefs[n_iter - 1] alpha[0] = C / n_samples if alpha[0] <= alpha_min + equality_tolerance: # early stopping if abs(alpha[0] - alpha_min) > equality_tolerance: # interpolation factor 0 <= ss < 1 if n_iter > 0: # In the first iteration, all alphas are zero, the formula # below would make ss a NaN ss = ((prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0])) coef[:] = prev_coef + ss * (coef - prev_coef) alpha[0] = alpha_min if return_path: coefs[n_iter] = coef break if n_iter >= max_iter or n_active >= n_features: break if not drop: ########################################################## # Append x_j to the Cholesky factorization of (Xa * Xa') # # # # ( L 0 ) # # L -> ( ) , where L * w = Xa' x_j # # ( w z ) and z = ||x_j|| # # # ########################################################## if positive: sign_active[n_active] = np.ones_like(C_) else: sign_active[n_active] = np.sign(C_) m, n = n_active, C_idx + n_active Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) indices[n], indices[m] = indices[m], indices[n] Cov_not_shortened = Cov Cov = Cov[1:] # remove Cov[0] if Gram is None: X.T[n], X.T[m] = swap(X.T[n], X.T[m]) c = nrm2(X.T[n_active]) ** 2 L[n_active, :n_active] = \ np.dot(X.T[n_active], X.T[:n_active].T) else: # swap does only work inplace if matrix is fortran # contiguous ... Gram[m], Gram[n] = swap(Gram[m], Gram[n]) Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) c = Gram[n_active, n_active] L[n_active, :n_active] = Gram[n_active, :n_active] # Update the cholesky decomposition for the Gram matrix if n_active: linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) diag = max(np.sqrt(np.abs(c - v)), eps) L[n_active, n_active] = diag if diag < 1e-7: # The system is becoming too ill-conditioned. # We have degenerate vectors in our active set. # We'll 'drop for good' the last regressor added. # Note: this case is very rare. It is no longer triggered by # the test suite. The `equality_tolerance` margin added in 0.16 # to get early stopping to work consistently on all versions of # Python including 32 bit Python under Windows seems to make it # very difficult to trigger the 'drop for good' strategy. warnings.warn('Regressors in active set degenerate. ' 'Dropping a regressor, after %i iterations, ' 'i.e. alpha=%.3e, ' 'with an active set of %i regressors, and ' 'the smallest cholesky pivot element being %.3e' % (n_iter, alpha, n_active, diag), ConvergenceWarning) # XXX: need to figure a 'drop for good' way Cov = Cov_not_shortened Cov[0] = 0 Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) continue active.append(indices[n_active]) n_active += 1 if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '', n_active, C)) if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]: # alpha is increasing. This is because the updates of Cov are # bringing in too much numerical error that is greater than # than the remaining correlation with the # regressors. Time to bail out warnings.warn('Early stopping the lars path, as the residues ' 'are small and the current value of alpha is no ' 'longer well controlled. %i iterations, alpha=%.3e, ' 'previous alpha=%.3e, with an active set of %i ' 'regressors.' % (n_iter, alpha, prev_alpha, n_active), ConvergenceWarning) break # least squares solution least_squares, info = solve_cholesky(L[:n_active, :n_active], sign_active[:n_active], lower=True) if least_squares.size == 1 and least_squares == 0: # This happens because sign_active[:n_active] = 0 least_squares[...] = 1 AA = 1. else: # is this really needed ? AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active])) if not np.isfinite(AA): # L is too ill-conditioned i = 0 L_ = L[:n_active, :n_active].copy() while not np.isfinite(AA): L_.flat[::n_active + 1] += (2 ** i) * eps least_squares, info = solve_cholesky( L_, sign_active[:n_active], lower=True) tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) AA = 1. / np.sqrt(tmp) i += 1 least_squares *= AA if Gram is None: # equiangular direction of variables in the active set eq_dir = np.dot(X.T[:n_active].T, least_squares) # correlation between each unactive variables and # eqiangular vector corr_eq_dir = np.dot(X.T[n_active:], eq_dir) else: # if huge number of features, this takes 50% of time, I # think could be avoided if we just update it using an # orthogonal (QR) decomposition of X corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny)) if positive: gamma_ = min(g1, C / AA) else: g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny)) gamma_ = min(g1, g2, C / AA) # TODO: better names for these variables: z drop = False z = -coef[active] / (least_squares + tiny32) z_pos = arrayfuncs.min_pos(z) if z_pos < gamma_: # some coefficients have changed sign idx = np.where(z == z_pos)[0][::-1] # update the sign, important for LAR sign_active[idx] = -sign_active[idx] if method == 'lasso': gamma_ = z_pos drop = True n_iter += 1 if return_path: if n_iter >= coefs.shape[0]: del coef, alpha, prev_alpha, prev_coef # resize the coefs and alphas array add_features = 2 * max(1, (max_features - n_active)) coefs = np.resize(coefs, (n_iter + add_features, n_features)) coefs[-add_features:] = 0 alphas = np.resize(alphas, n_iter + add_features) alphas[-add_features:] = 0 coef = coefs[n_iter] prev_coef = coefs[n_iter - 1] alpha = alphas[n_iter, np.newaxis] prev_alpha = alphas[n_iter - 1, np.newaxis] else: # mimic the effect of incrementing n_iter on the array references prev_coef = coef prev_alpha[0] = alpha[0] coef = np.zeros_like(coef) coef[active] = prev_coef[active] + gamma_ * least_squares # update correlations Cov -= gamma_ * corr_eq_dir # See if any coefficient has changed sign if drop and method == 'lasso': # handle the case when idx is not length of 1 [arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in idx] n_active -= 1 m, n = idx, n_active # handle the case when idx is not length of 1 drop_idx = [active.pop(ii) for ii in idx] if Gram is None: # propagate dropped variable for ii in idx: for i in range(ii, n_active): X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) # yeah this is stupid indices[i], indices[i + 1] = indices[i + 1], indices[i] # TODO: this could be updated residual = y - np.dot(X[:, :n_active], coef[active]) temp = np.dot(X.T[n_active], residual) Cov = np.r_[temp, Cov] else: for ii in idx: for i in range(ii, n_active): indices[i], indices[i + 1] = indices[i + 1], indices[i] Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) # Cov_n = Cov_j + x_j * X + increment(betas) TODO: # will this still work with multiple drops ? # recompute covariance. Probably could be done better # wrong as Xy is not swapped with the rest of variables # TODO: this could be updated residual = y - np.dot(X, coef) temp = np.dot(X.T[drop_idx], residual) Cov = np.r_[temp, Cov] sign_active = np.delete(sign_active, idx) sign_active = np.append(sign_active, 0.) # just to maintain size if verbose > 1: print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx, n_active, abs(temp))) if return_path: # resize coefs in case of early stop alphas = alphas[:n_iter + 1] coefs = coefs[:n_iter + 1] if return_n_iter: return alphas, active, coefs.T, n_iter else: return alphas, active, coefs.T else: if return_n_iter: return alpha, active, coef, n_iter else: return alpha, active, coef ############################################################################### # Estimator classes class Lars(LinearModel, RegressorMixin): """Least Angle Regression model a.k.a. LAR Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- n_nonzero_coefs : int, optional Target number of non-zero coefficients. Use ``np.inf`` for no limit. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If True the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \ whichever is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) \ | list of n_targets such arrays The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.Lars(n_nonzero_coefs=1) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True, n_nonzero_coefs=1, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] See also -------- lars_path, LarsCV sklearn.decomposition.sparse_encode """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.precompute = precompute self.n_nonzero_coefs = n_nonzero_coefs self.positive = positive self.eps = eps self.copy_X = copy_X self.fit_path = fit_path def _get_gram(self): # precompute if n_samples > n_features precompute = self.precompute if hasattr(precompute, '__array__'): Gram = precompute elif precompute == 'auto': Gram = 'auto' else: Gram = None return Gram def fit(self, X, y, Xy=None): """Fit the model using X, y as training data. parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \ optional Xy = np.dot(X.T, y) that can be precomputed. It is useful only when the Gram matrix is precomputed. returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True, multi_output=True) n_features = X.shape[1] X, y, X_offset, y_offset, X_scale = self._preprocess_data(X, y, self.fit_intercept, self.normalize, self.copy_X) if y.ndim == 1: y = y[:, np.newaxis] n_targets = y.shape[1] alpha = getattr(self, 'alpha', 0.) if hasattr(self, 'n_nonzero_coefs'): alpha = 0. # n_nonzero_coefs parametrization takes priority max_iter = self.n_nonzero_coefs else: max_iter = self.max_iter precompute = self.precompute if not hasattr(precompute, '__array__') and ( precompute is True or (precompute == 'auto' and X.shape[0] > X.shape[1]) or (precompute == 'auto' and y.shape[1] > 1)): Gram = np.dot(X.T, X) else: Gram = self._get_gram() self.alphas_ = [] self.n_iter_ = [] self.coef_ = np.empty((n_targets, n_features)) if self.fit_path: self.active_ = [] self.coef_path_ = [] for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, active, coef_path, n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=True, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.active_.append(active) self.n_iter_.append(n_iter_) self.coef_path_.append(coef_path) self.coef_[k] = coef_path[:, -1] if n_targets == 1: self.alphas_, self.active_, self.coef_path_, self.coef_ = [ a[0] for a in (self.alphas_, self.active_, self.coef_path_, self.coef_)] self.n_iter_ = self.n_iter_[0] else: for k in xrange(n_targets): this_Xy = None if Xy is None else Xy[:, k] alphas, _, self.coef_[k], n_iter_ = lars_path( X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X, copy_Gram=True, alpha_min=alpha, method=self.method, verbose=max(0, self.verbose - 1), max_iter=max_iter, eps=self.eps, return_path=False, return_n_iter=True, positive=self.positive) self.alphas_.append(alphas) self.n_iter_.append(n_iter_) if n_targets == 1: self.alphas_ = self.alphas_[0] self.n_iter_ = self.n_iter_[0] self._set_intercept(X_offset, y_offset, X_scale) return self class LassoLars(Lars): """Lasso model fit with Least Angle Regression a.k.a. Lars It is a Linear Model trained with an L1 prior as regularizer. The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- alpha : float Constant that multiplies the penalty term. Defaults to 1.0. ``alpha = 0`` is equivalent to an ordinary least square, solved by :class:`LinearRegression`. For numerical reasons, using ``alpha = 0`` with the LassoLars object is not advised and you should prefer the LinearRegression object. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. fit_path : boolean If ``True`` the full path is stored in the ``coef_path_`` attribute. If you compute the solution for a large problem or many targets, setting ``fit_path`` to ``False`` will lead to a speedup, especially with a small alpha. Attributes ---------- alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays Maximum of covariances (in absolute value) at each iteration. \ ``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \ nodes in the path with correlation greater than ``alpha``, whichever \ is smaller. active_ : list, length = n_alphas | list of n_targets such lists Indices of active variables at the end of the path. coef_path_ : array, shape (n_features, n_alphas + 1) or list If a list is passed it's expected to be one of n_targets such arrays. The varying values of the coefficients along the path. It is not present if the ``fit_path`` parameter is ``False``. coef_ : array, shape (n_features,) or (n_targets, n_features) Parameter vector (w in the formulation formula). intercept_ : float | array, shape (n_targets,) Independent term in decision function. n_iter_ : array-like or int. The number of iterations taken by lars_path to find the grid of alphas for each target. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLars(alpha=0.01) >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True, fit_path=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -0.963257...] See also -------- lars_path lasso_path Lasso LassoCV LassoLarsCV sklearn.decomposition.sparse_encode """ method = 'lasso' def __init__(self, alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, fit_path=True, positive=False): self.alpha = alpha self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.positive = positive self.precompute = precompute self.copy_X = copy_X self.eps = eps self.fit_path = fit_path ############################################################################### # Cross-validated estimator classes def _check_copy_and_writeable(array, copy=False): if copy or not array.flags.writeable: return array.copy() return array def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None, copy=True, method='lars', verbose=False, fit_intercept=True, normalize=True, max_iter=500, eps=np.finfo(np.float).eps, positive=False): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on Gram : None, 'auto', array, shape: (n_features, n_features), optional Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram matrix is precomputed from the given X, if there are more samples than features copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied; if False, they may be overwritten. method : 'lar' | 'lasso' Specifies the returned model. Select ``'lar'`` for Least Angle Regression, ``'lasso'`` for the Lasso. verbose : integer, optional Sets the amount of verbosity fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. See reservations for using this option in combination with method 'lasso' for expected small values of alpha in the doc of LassoLarsCV and LassoLarsIC. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. max_iter : integer, optional Maximum number of iterations to perform. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Returns -------- alphas : array, shape (n_alphas,) Maximum of covariances (in absolute value) at each iteration. ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever is smaller. active : list Indices of active variables at the end of the path. coefs : array, shape (n_features, n_alphas) Coefficients along the path residues : array, shape (n_alphas, n_samples) Residues of the prediction on the test data """ X_train = _check_copy_and_writeable(X_train, copy) y_train = _check_copy_and_writeable(y_train, copy) X_test = _check_copy_and_writeable(X_test, copy) y_test = _check_copy_and_writeable(y_test, copy) if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] alphas, active, coefs = lars_path( X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False, method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps, positive=positive) if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] return alphas, active, coefs, residues.T class LarsCV(Lars): """Cross-validated Least Angle Regression model Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. See also -------- lars_path, LassoLars, LassoLarsCV """ method = 'lar' def __init__(self, fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.max_iter = max_iter self.cv = cv self.max_n_alphas = max_n_alphas self.n_jobs = n_jobs super(LarsCV, self).__init__(fit_intercept=fit_intercept, verbose=verbose, normalize=normalize, precompute=precompute, n_nonzero_coefs=500, eps=eps, copy_X=copy_X, fit_path=True, positive=positive) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=self.copy_X) y = as_float_array(y, copy=self.copy_X) # init cross-validation generator cv = check_cv(self.cv, classifier=False) Gram = 'auto' if self.precompute else None cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_lars_path_residues)( X[train], y[train], X[test], y[test], Gram=Gram, copy=False, method=self.method, verbose=max(0, self.verbose - 1), normalize=self.normalize, fit_intercept=self.fit_intercept, max_iter=self.max_iter, eps=self.eps, positive=self.positive) for train, test in cv.split(X, y)) all_alphas = np.concatenate(list(zip(*cv_paths))[0]) # Unique also sorts all_alphas = np.unique(all_alphas) # Take at most max_n_alphas values stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) all_alphas = all_alphas[::stride] mse_path = np.empty((len(all_alphas), len(cv_paths))) for index, (alphas, active, coefs, residues) in enumerate(cv_paths): alphas = alphas[::-1] residues = residues[::-1] if alphas[0] != 0: alphas = np.r_[0, alphas] residues = np.r_[residues[0, np.newaxis], residues] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] residues = np.r_[residues, residues[-1, np.newaxis]] this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) this_residues **= 2 mse_path[:, index] = np.mean(this_residues, axis=-1) mask = np.all(np.isfinite(mse_path), axis=-1) all_alphas = all_alphas[mask] mse_path = mse_path[mask] # Select the alpha that minimizes left-out error i_best_alpha = np.argmin(mse_path.mean(axis=-1)) best_alpha = all_alphas[i_best_alpha] # Store our parameters self.alpha_ = best_alpha self.cv_alphas_ = all_alphas self.mse_path_ = mse_path # Now compute the full model # it will call a lasso internally when self if LassoLarsCV # as self.method == 'lasso' Lars.fit(self, X, y) return self @property def alpha(self): # impedance matching for the above Lars.fit (should not be documented) return self.alpha_ @property @deprecated("Attribute cv_mse_path_ is deprecated in 0.18 and " "will be removed in 0.20. Use 'mse_path_' instead") def cv_mse_path_(self): return self.mse_path_ class LassoLarsCV(LarsCV): """Cross-validated Lasso, using the LARS algorithm The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsCV only makes sense for problems where a sparse solution is expected and/or reached. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. max_n_alphas : integer, optional The maximum number of points on the path used to compute the residuals in the cross-validation n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. coef_path_ : array, shape (n_features, n_alphas) the varying values of the coefficients along the path alpha_ : float the estimated regularization parameter alpha alphas_ : array, shape (n_alphas,) the different values of alpha along the path cv_alphas_ : array, shape (n_cv_alphas,) all the values of alpha along the path for the different folds mse_path_ : array, shape (n_folds, n_cv_alphas) the mean square error on left-out for each fold along the path (alpha values given by ``cv_alphas``) n_iter_ : array-like or int the number of iterations run by Lars with the optimal alpha. Notes ----- The object solves the same problem as the LassoCV object. However, unlike the LassoCV, it find the relevant alphas values by itself. In general, because of this property, it will be more stable. However, it is more fragile to heavily multicollinear datasets. It is more efficient than the LassoCV if only a small number of features are selected compared to the total number, for instance if there are very few samples compared to the number of features. See also -------- lars_path, LassoLars, LarsCV, LassoCV """ method = 'lasso' class LassoLarsIC(LassoLars): """Lasso model fit with Lars using BIC or AIC for model selection The optimization objective for Lasso is:: (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 AIC is the Akaike information criterion and BIC is the Bayes Information criterion. Such criteria are useful to select the value of the regularization parameter by making a trade-off between the goodness of fit and the complexity of the model. A good model should explain well the data while being simple. Read more in the :ref:`User Guide <least_angle_regression>`. Parameters ---------- criterion : 'bic' | 'aic' The type of criterion to use. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). positive : boolean (default=False) Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients do not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator. As a consequence using LassoLarsIC only makes sense for problems where a sparse solution is expected and/or reached. verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. precompute : True | False | 'auto' | array-like Whether to use a precomputed Gram matrix to speed up calculations. If set to ``'auto'`` let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform. Can be used for early stopping. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the ``tol`` parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. Attributes ---------- coef_ : array, shape (n_features,) parameter vector (w in the formulation formula) intercept_ : float independent term in decision function. alpha_ : float the alpha parameter chosen by the information criterion n_iter_ : int number of iterations run by lars_path to find the grid of alphas. criterion_ : array, shape (n_alphas,) The value of the information criteria ('aic', 'bic') across all alphas. The alpha which has the smallest information criteria is chosen. Examples -------- >>> from sklearn import linear_model >>> clf = linear_model.LassoLarsIC(criterion='bic') >>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True, max_iter=500, normalize=True, positive=False, precompute='auto', verbose=False) >>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE [ 0. -1.11...] Notes ----- The estimation of the number of degrees of freedom is given by: "On the degrees of freedom of the lasso" Hui Zou, Trevor Hastie, and Robert Tibshirani Ann. Statist. Volume 35, Number 5 (2007), 2173-2192. https://en.wikipedia.org/wiki/Akaike_information_criterion https://en.wikipedia.org/wiki/Bayesian_information_criterion See also -------- lars_path, LassoLars, LassoLarsCV """ def __init__(self, criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, copy_X=True, positive=False): self.criterion = criterion self.fit_intercept = fit_intercept self.positive = positive self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.copy_X = copy_X self.precompute = precompute self.eps = eps self.fit_path = True def fit(self, X, y, copy_X=True): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) training data. y : array-like, shape (n_samples,) target values. copy_X : boolean, optional, default True If ``True``, X will be copied; else, it may be overwritten. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data( X, y, self.fit_intercept, self.normalize, self.copy_X) max_iter = self.max_iter Gram = self._get_gram() alphas_, active_, coef_path_, self.n_iter_ = lars_path( X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0, method='lasso', verbose=self.verbose, max_iter=max_iter, eps=self.eps, return_n_iter=True, positive=self.positive) n_samples = X.shape[0] if self.criterion == 'aic': K = 2 # AIC elif self.criterion == 'bic': K = log(n_samples) # BIC else: raise ValueError('criterion should be either bic or aic') R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals mean_squared_error = np.mean(R ** 2, axis=0) df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom for k, coef in enumerate(coef_path_.T): mask = np.abs(coef) > np.finfo(coef.dtype).eps if not np.any(mask): continue # get the number of degrees of freedom equal to: # Xc = X[:, mask] # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs df[k] = np.sum(mask) self.alphas_ = alphas_ with np.errstate(divide='ignore'): self.criterion_ = n_samples * np.log(mean_squared_error) + K * df n_best = np.argmin(self.criterion_) self.alpha_ = alphas_[n_best] self.coef_ = coef_path_[:, n_best] self._set_intercept(Xmean, ymean, Xstd) return self
bsd-3-clause
pravsripad/mne-python
examples/preprocessing/plot_shift_evoked.py
29
1245
""" ================================== Shifting time-scale in evoked data ================================== """ # Author: Mainak Jas <[email protected]> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne.viz import tight_layout from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname = data_path + '/MEG/sample/sample_audvis-ave.fif' # Reading evoked data condition = 'Left Auditory' evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0), proj=True) ch_names = evoked.info['ch_names'] picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"]) # Create subplots f, (ax1, ax2, ax3) = plt.subplots(3) evoked.plot(exclude=[], picks=picks, axes=ax1, titles=dict(grad='Before time shifting'), time_unit='s') # Apply relative time-shift of 500 ms evoked.shift_time(0.5, relative=True) evoked.plot(exclude=[], picks=picks, axes=ax2, titles=dict(grad='Relative shift: 500 ms'), time_unit='s') # Apply absolute time-shift of 500 ms evoked.shift_time(0.5, relative=False) evoked.plot(exclude=[], picks=picks, axes=ax3, titles=dict(grad='Absolute shift: 500 ms'), time_unit='s') tight_layout()
bsd-3-clause
MatthieuBizien/scikit-learn
examples/linear_model/plot_lasso_model_selection.py
311
5431
""" =================================================== Lasso model selection: Cross-Validation / AIC / BIC =================================================== Use the Akaike information criterion (AIC), the Bayes Information criterion (BIC) and cross-validation to select an optimal value of the regularization parameter alpha of the :ref:`lasso` estimator. Results obtained with LassoLarsIC are based on AIC/BIC criteria. Information-criterion based model selection is very fast, but it relies on a proper estimation of degrees of freedom, are derived for large samples (asymptotic results) and assume the model is correct, i.e. that the data are actually generated by this model. They also tend to break when the problem is badly conditioned (more features than samples). For cross-validation, we use 20-fold with 2 algorithms to compute the Lasso path: coordinate descent, as implemented by the LassoCV class, and Lars (least angle regression) as implemented by the LassoLarsCV class. Both algorithms give roughly the same results. They differ with regards to their execution speed and sources of numerical errors. Lars computes a path solution only for each kink in the path. As a result, it is very efficient when there are only of few kinks, which is the case if there are few features or samples. Also, it is able to compute the full path without setting any meta parameter. On the opposite, coordinate descent compute the path points on a pre-specified grid (here we use the default). Thus it is more efficient if the number of grid points is smaller than the number of kinks in the path. Such a strategy can be interesting if the number of features is really large and there are enough samples to select a large amount. In terms of numerical errors, for heavily correlated variables, Lars will accumulate more errors, while the coordinate descent algorithm will only sample the path on a grid. Note how the optimal value of alpha varies for each fold. This illustrates why nested-cross validation is necessary when trying to evaluate the performance of a method for which a parameter is chosen by cross-validation: this choice of parameter may not be optimal for unseen data. """ print(__doc__) # Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort # License: BSD 3 clause import time import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target rng = np.random.RandomState(42) X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features # normalize data as done by Lars to allow for comparison X /= np.sqrt(np.sum(X ** 2, axis=0)) ############################################################################## # LassoLarsIC: least angle regression with BIC/AIC criterion model_bic = LassoLarsIC(criterion='bic') t1 = time.time() model_bic.fit(X, y) t_bic = time.time() - t1 alpha_bic_ = model_bic.alpha_ model_aic = LassoLarsIC(criterion='aic') model_aic.fit(X, y) alpha_aic_ = model_aic.alpha_ def plot_ic_criterion(model, name, color): alpha_ = model.alpha_ alphas_ = model.alphas_ criterion_ = model.criterion_ plt.plot(-np.log10(alphas_), criterion_, '--', color=color, linewidth=3, label='%s criterion' % name) plt.axvline(-np.log10(alpha_), color=color, linewidth=3, label='alpha: %s estimate' % name) plt.xlabel('-log(alpha)') plt.ylabel('criterion') plt.figure() plot_ic_criterion(model_aic, 'AIC', 'b') plot_ic_criterion(model_bic, 'BIC', 'r') plt.legend() plt.title('Information-criterion for model selection (training time %.3fs)' % t_bic) ############################################################################## # LassoCV: coordinate descent # Compute paths print("Computing regularization path using the coordinate descent lasso...") t1 = time.time() model = LassoCV(cv=20).fit(X, y) t_lasso_cv = time.time() - t1 # Display results m_log_alphas = -np.log10(model.alphas_) plt.figure() ymin, ymax = 2300, 3800 plt.plot(m_log_alphas, model.mse_path_, ':') plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k', label='Average across the folds', linewidth=2) plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', label='alpha: CV estimate') plt.legend() plt.xlabel('-log(alpha)') plt.ylabel('Mean square error') plt.title('Mean square error on each fold: coordinate descent ' '(train time: %.2fs)' % t_lasso_cv) plt.axis('tight') plt.ylim(ymin, ymax) ############################################################################## # LassoLarsCV: least angle regression # Compute paths print("Computing regularization path using the Lars lasso...") t1 = time.time() model = LassoLarsCV(cv=20).fit(X, y) t_lasso_lars_cv = time.time() - t1 # Display results m_log_alphas = -np.log10(model.cv_alphas_) plt.figure() plt.plot(m_log_alphas, model.cv_mse_path_, ':') plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k', label='Average across the folds', linewidth=2) plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', label='alpha CV') plt.legend() plt.xlabel('-log(alpha)') plt.ylabel('Mean square error') plt.title('Mean square error on each fold: Lars (train time: %.2fs)' % t_lasso_lars_cv) plt.axis('tight') plt.ylim(ymin, ymax) plt.show()
bsd-3-clause
nismod/energy_demand
energy_demand/plotting/_test.py
1
8956
from smif.data_layer import Results import matplotlib.pyplot as plt import os import seaborn as sbn import geopandas as gpd import pandas as pd '''path = "C:/Users/cenv0553/nismod2/results/electricworld/energy_demand_constrained/decision_0" a = os.listdir(path) for i in a: print(i)''' path = 'C:/Users/cenv0553/nismod2' out_path = "//ouce-file1.ouce.ox.ac.uk/Users/staff/cenv0553/Desktop/arc_results" ''' results = Results({'interface': 'local_csv', 'dir': path}) df = results.read(['energy_sd_optimised'], ['energy_demand_constrained'], ['service_gas']).drop(columns='decision') agg = df.groupby(by=['model_run', 'timestep', 'lad_uk_2016'], squeeze=True, sort=False).sum().drop(columns='hourly') agg = agg.reset_index() plot_lads(agg, 'service_gas')''' results = Results({'interface': 'local_csv','dir': path}) print("--------") model_runs = results.list_model_runs() for i in model_runs: print(i) filenames = [ 'industry_biomass_boiler_biomass', 'industry_biomass_district_heating_biomass', 'industry_biomass_non_heating', 'industry_electricity_boiler_electricity', 'industry_electricity_district_heating_electricity', 'industry_electricity_heat_pumps_electricity', 'industry_electricity_non_heating', 'industry_gas_boiler_gas', 'industry_gas_district_heating_CHP_gas', 'industry_gas_non_heating', 'industry_hydrogen_boiler_hydrogen', 'industry_hydrogen_district_heating_fuel_cell', 'industry_hydrogen_fuel_cell_hydrogen', 'industry_hydrogen_heat_pumps_hydrogen', 'industry_hydrogen_non_heating', 'industry_oil_boiler_oil', 'industry_oil_non_heating', 'industry_solid_fuel_boiler_solid_fuel', 'industry_solid_fuel_non_heating', 'residential_biomass_boiler_biomass', 'residential_biomass_district_heating_biomass', 'residential_biomass_non_heating', 'residential_electricity_boiler_electricity', 'residential_electricity_district_heating_electricity', 'residential_electricity_heat_pumps_electricity', 'residential_electricity_non_heating', 'residential_gas_boiler_gas', 'residential_gas_district_heating_CHP_gas', 'residential_gas_non_heating', 'residential_hydrogen_boiler_hydrogen', 'residential_hydrogen_district_heating_fuel_cell', 'residential_hydrogen_fuel_cell_hydrogen', 'residential_hydrogen_heat_pumps_hydrogen', 'residential_hydrogen_non_heating', 'residential_oil_boiler_oil', 'residential_oil_non_heating', 'residential_solid_fuel_boiler_solid_fuel', 'residential_solid_fuel_non_heating', 'service_biomass_boiler_biomass', 'service_biomass_district_heating_biomass', 'service_biomass_non_heating', 'service_electricity_boiler_electricity', 'service_electricity_district_heating_electricity', 'service_electricity_heat_pumps_electricity', 'service_electricity_non_heating', 'service_gas_boiler_gas', 'service_gas_district_heating_CHP_gas', 'service_gas_non_heating', 'service_hydrogen_boiler_hydrogen', 'service_hydrogen_district_heating_fuel_cell', 'service_hydrogen_fuel_cell_hydrogen', 'service_hydrogen_heat_pumps_hydrogen', 'service_hydrogen_non_heating', 'service_oil_boiler_oil', 'service_oil_non_heating', 'service_solid_fuel_boiler_solid_fuel', 'service_solid_fuel_non_heating', 'service_solid_fuel_non_heating' ] filenames_electricity = [ 'industry_electricity_boiler_electricity', 'industry_electricity_district_heating_electricity', 'industry_electricity_heat_pumps_electricity', 'industry_electricity_non_heating', 'residential_electricity_boiler_electricity', 'residential_electricity_district_heating_electricity', 'residential_electricity_heat_pumps_electricity', 'residential_electricity_non_heating', 'service_electricity_boiler_electricity', 'service_electricity_district_heating_electricity', 'service_electricity_heat_pumps_electricity', 'service_electricity_non_heating', ] # ['model_run', 'timestep', 'decision', 'lad_uk_2016', 'hourly', filenames...] filenames = filenames_electricity #filenames_electricity #filenames = ['industry_electricity_boiler_electricity','industry_electricity_district_heating_electricity'] model_runs = ['electricworld', 'multivector'] model_runs = ['energy_demand_constrained'] timesteps = [2015, 2020, 2030, 2050] select_arc_regions = False print("... reading in data ", flush=True) df = results.read( model_run_names=model_runs, model_names=['energy_demand_constrained'], output_names=filenames, timesteps=timesteps) print("... finished reading in data ", flush=True) df = df.drop(columns=['decision']) # Select regions if select_arc_regions: arc_regions = [ "E07000008", "E07000012", "E07000009", "E07000011", "E07000099", "E07000242", "E07000243", "E07000178", "E07000180", "E07000179", "E07000181", "E07000177", "E06000030", "E06000042", "E06000055", "E07000004", "E06000032", "E06000056", "E07000154", "E07000151", "E07000156", "E07000155"] df = df.loc[df['lad_uk_2016'].isin(arc_regions)] df_grouped = df.groupby( by=['model_run', 'lad_uk_2016', 'timestep', 'hourly'], as_index=False).sum() print("-----------grouped-------------") print(type(df_grouped)) print(df.columns.tolist()) print(df.sample(n=3).values) df_annual_sum = pd.DataFrame(columns=model_runs, index=timesteps) df_peak = pd.DataFrame(columns=model_runs, index=timesteps) regional_dict = {} regions = list(set(df_grouped['lad_uk_2016'].values.tolist())) for model_run in model_runs: print("... {}".format(model_run), flush=True) df_regions = pd.DataFrame(columns=regions, index=timesteps) for timestep in timesteps: print("... {}".format(timestep), flush=True) # Select data annual_all_reg_all_hourly = df_grouped[( df_grouped['model_run'] == model_run) & ( df_grouped['timestep'] == timestep)] annual_all_reg_all_hourly = annual_all_reg_all_hourly.drop(columns=['model_run', 'timestep']) annual_tot_sum = annual_all_reg_all_hourly.groupby(by='lad_uk_2016', as_index=False).sum() annual_hourly = annual_all_reg_all_hourly.groupby(by='hourly', as_index=False).sum() #annual_regions = annual_all_reg_all_hourly.groupby(by='lad_uk_2016', as_index=False).sum() annual_tot_sum = annual_tot_sum.drop(columns=['lad_uk_2016', 'hourly']) annual_hourly = annual_hourly.drop(columns=['hourly']) #annual_regions = annual_regions.drop(columns=['hourly']) # National total annual_tot_sum = annual_tot_sum.sum(axis=1).sum().sum() df_annual_sum[model_run][timestep] = annual_tot_sum / 1000 #GW to TW # Peak total df_peak[model_run][timestep] = annual_hourly.sum(axis=1).max() # Regional #annual_regions = annual_regions.set_index('lad_uk_2016') #annual_regions = annual_regions.T #df_year = pd.DataFrame(annual_regions.tolist(), columns=regions) #df_regions.append(df_year) #regional_dict[model_run] = df_regions # save figures df_annual_sum.plot() plt.savefig(os.path.join(out_path, "total_annual_sum.pdf")) plt.close() df_peak.plot() plt.savefig(os.path.join(out_path, "peak_demand.pdf")) plt.close() #for key, df_values in df_regions.items(): # df_values.plot() #plt.savefig(os.path.join(out_path, "regions.pdf")) #plt.close() print("... finished plotting") raise Exception("tt") for model_run in model_runs: df_modelrun = df[df['model_run'] == model_run] df_modelrun = df_modelrun.drop(columns='model_run') for timestep in timesteps: df_modelrun_year = df_modelrun[df_modelrun['timestep'] == timestep] df_modelrun = df_modelrun.drop(columns='timestep') df_modelrun_year = df_modelrun_year.groupby(by=['lad_uk_2016', 'hourly'], as_index=False).sum() df_modelrun_year = df_modelrun_year.drop(columns=['lad_uk_2016', 'hourly']) # Sum demand across timesteps and regions df_modelrun_year = df_modelrun_year.sum() print("...") print(df_modelrun_year) df_empty[model_run][timestep] = df_modelrun_year #'model_run', 'timestep', 'seasonal_week df = df.groupby(by=['model_run', 'lad_uk_2016', 'timestep', 'hourly']).sum() print(" ") print("A ==================") print(df.columns.tolist()) print(df.shape) print(df.sample(n=3)) print("Sum annual across all filenames") df = df.sum(axis=1).to_frame() print(" ") print("B ==================") print(df.shape) print(df.sample(n=3)) # Sum across all regions df = df.sum().to_frame() print("___3_______") print(df.shape) print(df) # Sum all filenames #df = df.sum(axis=1) df.plot() #print(df.columns.tolist()) #print(df['residential_electricity_non_heating'].shape) #df = df.reset_index() #df['residential_electricity_non_heating'].plot() #df.plot_lads('residential_electricity_non_heating') plt.show()
mit
badlogicmanpreet/nupic
examples/opf/tools/MirrorImageViz/mirrorImageViz.py
50
7221
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- # Author: Surabhi Gupta import sys import numpy as np import matplotlib.pylab as pyl def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset): '''Mirror Image Visualization: Shows the encoding space juxtaposed against the coincidence space. The encoding space is the bottom-up sensory encoding and the coincidence space depicts the corresponding activation of coincidences in the SP. Hence, the mirror image visualization is a visual depiction of the mapping of SP cells to the input representations. Note: * The files spBUOut and sensorBUOut are assumed to be in the output format used for LPF experiment outputs. * BU outputs for some sample datasets are provided. Specify the name of the dataset as an option while running this script. ''' lines = activeCoincsFile.readlines() inputs = encodingsFile.readlines() w = len(inputs[0].split(' '))-1 patterns = set([]) encodings = set([]) coincs = [] #The set of all coincidences that have won at least once reUsedCoincs = [] firstLine = inputs[0].split(' ') size = int(firstLine.pop(0)) spOutput = np.zeros((len(lines),40)) inputBits = np.zeros((len(lines),w)) print 'Total n:', size print 'Total number of records in the file:', len(lines), '\n' print 'w:', w count = 0 for x in xrange(len(lines)): inputSpace = [] #Encoded representation for each input spBUout = [int(z) for z in lines[x].split(' ')] spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP temp = set(spBUout) spOutput[x]=spBUout input = [int(z) for z in inputs[x].split(' ')] input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space tempInput = set(input) inputBits[x]=input #Creating the encoding space for m in xrange(size): if m in tempInput: inputSpace.append(m) else: inputSpace.append('|') #A non-active bit repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active #Dividing the coincidences into two difference categories. if len(reUsed)==0: coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary) else: reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput)) patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once encodings = encodings.union(tempInput) count +=1 overlap = {} overlapVal = 0 seen = [] seen = (printOverlaps(coincs, coincs, seen)) print len(seen), 'sets of 40 cells' seen = printOverlaps(reUsedCoincs, coincs, seen) Summ=[] for z in coincs: c=0 for y in reUsedCoincs: c += len(z[1].intersection(y[1])) Summ.append(c) print 'Sum: ', Summ for m in xrange(3): displayLimit = min(51, len(spOutput[m*200:])) if displayLimit>0: drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1) else: print 'No more records to display' pyl.show() def drawFile(dataset, matrix, patterns, cells, w, fnum): '''The similarity of two patterns in the bit-encoding space is displayed alongside their similarity in the sp-coinc space.''' score=0 count = 0 assert len(patterns)==len(cells) for p in xrange(len(patterns)-1): matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]] matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]] score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:]))) count += len(matrix[p+1:,p]) print 'Score', score/count fig = pyl.figure(figsize = (10,10), num = fnum) pyl.matshow(matrix, fignum = fnum) pyl.colorbar() pyl.title('Coincidence Space', verticalalignment='top', fontsize=12) pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17) pyl.ylabel('Encoding space', fontsize=12) def printOverlaps(comparedTo, coincs, seen): """ Compare the results and return True if success, False if failure Parameters: -------------------------------------------------------------------- coincs: Which cells are we comparing? comparedTo: The set of 40 cells we being compared to (they have no overlap with seen) seen: Which of the cells we are comparing to have already been encountered. This helps glue together the unique and reused coincs """ inputOverlap = 0 cellOverlap = 0 for y in comparedTo: closestInputs = [] closestCells = [] if len(seen)>0: inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))]) cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))]) for m in xrange( len(seen) ): if len(seen[m][1].intersection(y[4]))==inputOverlap: closestInputs.append(seen[m][2]) if len(seen[m][0].intersection(y[1]))==cellOverlap: closestCells.append(seen[m][2]) seen.append((y[1], y[4], y[0])) print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \ 'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells) return seen if __name__=='__main__': if len(sys.argv)<2: #Use basil if no dataset specified print ('Input files required. Read documentation for details.') else: dataset = sys.argv[1] activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt' encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt' activeCoincsFile=open(activeCoincsPath, 'r') encodingsFile=open(encodingsPath, 'r') analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
agpl-3.0
georgyberdyshev/ascend
models/steam/stab.py
1
3034
# this is a script that computes the stability of the DAE system of equations # by using the sparse matrix routines in scipy and plotting with matplotlib. # # you could get fancy and produce a root locus using this technique... import ascpy L = ascpy.Library() L.load('steam/dsgsat3.a4c') T = L.findType('dsgsat3') M = T.getSimulation('sim',False) M.run(T.getMethod('on_load')) print "STEADY-STATE SOLUTION..." M.solve(ascpy.Solver('QRSlv'),ascpy.SolverReporter()) M.run(T.getMethod('configure_dynamic')) M.solve(ascpy.Solver('QRSlv'),ascpy.SolverReporter()) M.run(T.getMethod('free_states')) # here is the peturbation... print "CREATING PETURBATION..." M.qdot_s.setRealValueWithUnits(6000,"W/m") # IDA has its own initial conditions solver, so no need to call QRSlv here I = ascpy.Integrator(M) I.setEngine('IDA') I.setParameter('linsolver','DENSE') I.setParameter('safeeval',True) I.setParameter('rtol',1e-4) I.setParameter('atolvect',False) I.setParameter('atol',1e-4) I.setParameter('maxord',2) I.setParameter('calcic','YA_YDP') I.setInitialSubStep(0.001) I.setReporter(ascpy.IntegratorReporterConsole(I)) I.setLogTimesteps(ascpy.Units("s"), 0.001, 0.002, 10) I.analyse() F = file('gz.mm','w') I.writeMatrix(F,'dg/dz') F = file('gx.mm','w') I.writeMatrix(F,'dg/dx') F = file('fz.mm','w') I.writeMatrix(F,'df/dz') F = file('fx.mm','w') I.writeMatrix(F,'df/dx') F = file('fxp.mm','w') I.writeMatrix(F,"df/dx'") #I.solve() from scipy import io from scipy import linalg gz = io.mmread('gz.mm') gx = io.mmread('gx.mm') fz = io.mmread('fz.mm') fx = io.mmread('fx.mm') fxp = io.mmread('fxp.mm') print "gz", gz.shape print "gx", gx.shape print "fz", fz.shape print "fx", fx.shape print "fxp", fxp.shape #import pylab # dg/dy_a #pylab.spy2(ga.todense()) #pylab.title("${dg}/{dy_a}$") #pylab.show() invgz = linalg.inv(gz.todense()) #pylab.figure() #pylab.spy(invgz) #pylab.title("$({dg}/{dy_d})^{-1}$") #pylab.show() # dg/dy_d #pylab.figure() #pylab.spy2(gd.todense()) #pylab.title("${dg}/{dy_d}$") #pylab.show() # df/dyd' #pylab.figure() #pylab.spy2(fdp.todense()) #pylab.title("${df}/{d\dot{y}_d}$") #pylab.show() invfxp = linalg.inv(fxp.todense()) #pylab.spy2(invfdp) #pylab.title("$({df}/{dy_dp})^{-1}$") #pylab.show() dya_dyd = invgz * gx print "gz^-1 gx",dya_dyd.shape #pylab.spy2(dya_dyd.todense()) #pylab.title("${dy_a}/{dy_d}$") #pylab.show() B = fz * invgz * gx print "fz gz^1 gz",B.shape #pylab.spy2(fad.todense()) #pylab.title("${df}/{dy_a} * {dy_a}/{dy_d}$") #pylab.show() C = fx + B D = - invfxp * C e,v = linalg.eig(D.todense()) #print e print "ROOT RANGE-----------" print "max re(e)",max(e.real) print "min re(e)",min(e.real) print "max im(e)",max(e.imag) print "min in(e)",min(e.imag) sys.stdout.flush() #I.solve() import pylab, sys sys.stderr.write("about to plot...") pylab.plot(e.real,e.imag,'rx') pylab.xlabel('Real axis') pylab.ylabel('Imaginary axis') pylab.show() sys.stderr.write("DONE\n") I.setLogTimesteps(ascpy.Units("s"), 0.0005, 3600, 10) I.setParameter('calcic','Y') I.solve()
gpl-2.0
gotm-model/code
scripts/python/plot_airsea.py
2
1051
#!/usr/bin/env python # Documentation of 'plotfile' see - # http://matplotlib.org/api/pyplot_api.html#module-matplotlib.pyplot # This script is used to plot the data generated by - 'test_airsea' from pylab import plotfile, show, gca, savefig fname="fort.100" plotfile(fname, (0, 1, 2, 3, 4), delimiter=' ',subplots=False) gca().set_xlabel(r'hour') gca().set_title(r'Solar zenith angle (1 day)') savefig('solar_zenith_angle_day.png') show() fname="fort.101" plotfile(fname, (0, 1, 2, 3, 4), delimiter=' ',subplots=False) gca().set_xlabel(r'hour') gca().set_title(r'Short wave radiation (1 day)') savefig('short_wave_radiation_day.png') show() fname="fort.200" plotfile(fname, (0, 1, 2, 3, 4), delimiter=' ',subplots=False) gca().set_xlabel(r'day') gca().set_title(r'Solar zenith angle (1 year)') savefig('solar_zenith_angle_year.png') show() fname="fort.201" plotfile(fname, (0, 1, 2, 3, 4), delimiter=' ',subplots=False) gca().set_xlabel(r'day') gca().set_title(r'Short wave radiation (1 year)') savefig('short_wave_radiation_year.png') show()
gpl-2.0
gef756/statsmodels
statsmodels/sandbox/examples/ex_random_panel.py
34
6012
# -*- coding: utf-8 -*- """ Created on Fri May 18 13:05:47 2012 Author: Josef Perktold moved example from main of random_panel """ import numpy as np from statsmodels.sandbox.panel.panel_short import ShortPanelGLS, ShortPanelGLS2 from statsmodels.sandbox.panel.random_panel import PanelSample import statsmodels.sandbox.panel.correlation_structures as cs import statsmodels.stats.sandwich_covariance as sw #from statsmodels.stats.sandwich_covariance import ( # S_hac_groupsum, weights_bartlett, _HCCM2) from statsmodels.stats.moment_helpers import cov2corr, se_cov cov_nw_panel2 = sw.cov_nw_groupsum examples = ['ex1'] if 'ex1' in examples: nobs = 100 nobs_i = 5 n_groups = nobs // nobs_i k_vars = 3 # dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_equi, # corr_args=(0.6,)) # dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_ar, # corr_args=([1, -0.95],)) dgp = PanelSample(nobs, k_vars, n_groups, corr_structure=cs.corr_arma, corr_args=([1], [1., -0.9],), seed=377769) print('seed', dgp.seed) y = dgp.generate_panel() noise = y - dgp.y_true print(np.corrcoef(y.reshape(-1,n_groups, order='F'))) print(np.corrcoef(noise.reshape(-1,n_groups, order='F'))) mod = ShortPanelGLS2(y, dgp.exog, dgp.groups) res = mod.fit() print(res.params) print(res.bse) #Now what? #res.resid is of transformed model #np.corrcoef(res.resid.reshape(-1,n_groups, order='F')) y_pred = np.dot(mod.exog, res.params) resid = y - y_pred print(np.corrcoef(resid.reshape(-1,n_groups, order='F'))) print(resid.std()) err = y_pred - dgp.y_true print(err.std()) #OLS standard errors are too small mod.res_pooled.params mod.res_pooled.bse #heteroscedasticity robust doesn't help mod.res_pooled.HC1_se #compare with cluster robust se print(sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int)))) #not bad, pretty close to panel estimator #and with Newey-West Hac print(sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx))) #too small, assuming no bugs, #see Peterson assuming it refers to same kind of model print(dgp.cov) mod2 = ShortPanelGLS(y, dgp.exog, dgp.groups) res2 = mod2.fit_iterative(2) print(res2.params) print(res2.bse) #both implementations produce the same results: from numpy.testing import assert_almost_equal assert_almost_equal(res.params, res2.params, decimal=12) assert_almost_equal(res.bse, res2.bse, decimal=13) mod5 = ShortPanelGLS(y, dgp.exog, dgp.groups) res5 = mod5.fit_iterative(5) print(res5.params) print(res5.bse) #fitting once is the same as OLS #note: I need to create new instance, otherwise it continuous fitting mod1 = ShortPanelGLS(y, dgp.exog, dgp.groups) res1 = mod1.fit_iterative(1) res_ols = mod1._fit_ols() assert_almost_equal(res1.params, res_ols.params, decimal=12) assert_almost_equal(res1.bse, res_ols.bse, decimal=13) #cov_hac_panel with uniform_kernel is the same as cov_cluster for balanced #panel with full length kernel #I fixe default correction to be equal mod2._fit_ols() cov_clu = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int)) clubse = se_cov(cov_clu) cov_uni = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx, weights_func=sw.weights_uniform, use_correction='cluster') assert_almost_equal(cov_uni, cov_clu, decimal=7) #without correction cov_clu2 = sw.cov_cluster(mod2.res_pooled, dgp.groups.astype(int), use_correction=False) cov_uni2 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx, weights_func=sw.weights_uniform, use_correction=False) assert_almost_equal(cov_uni2, cov_clu2, decimal=8) cov_white = sw.cov_white_simple(mod2.res_pooled) cov_pnw0 = sw.cov_nw_panel(mod2.res_pooled, 0, mod2.group.groupidx, use_correction='hac') assert_almost_equal(cov_pnw0, cov_white, decimal=13) time = np.tile(np.arange(nobs_i), n_groups) #time = mod2.group.group_int cov_pnw1 = sw.cov_nw_panel(mod2.res_pooled, 4, mod2.group.groupidx) cov_pnw2 = cov_nw_panel2(mod2.res_pooled, 4, time) #s = sw.group_sums(x, time) c2, ct, cg = sw.cov_cluster_2groups(mod2.res_pooled, time, dgp.groups.astype(int), use_correction=False) ct_nw0 = cov_nw_panel2(mod2.res_pooled, 0, time, weights_func=sw.weights_uniform, use_correction=False) cg_nw0 = cov_nw_panel2(mod2.res_pooled, 0, dgp.groups.astype(int), weights_func=sw.weights_uniform, use_correction=False) assert_almost_equal(ct_nw0, ct, decimal=13) assert_almost_equal(cg_nw0, cg, decimal=13) #pnw2 0 lags assert_almost_equal(cov_clu2, cg, decimal=13) assert_almost_equal(cov_uni2, cg, decimal=8) #pnw all lags import pandas as pa #pandas.DataFrame doesn't do inplace append se = pa.DataFrame(res_ols.bse[None,:], index=['OLS']) se = se.append(pa.DataFrame(res5.bse[None,:], index=['PGLSit5'])) clbse = sw.se_cov(sw.cov_cluster(mod.res_pooled, dgp.groups.astype(int))) se = se.append(pa.DataFrame(clbse[None,:], index=['OLSclu'])) pnwse = sw.se_cov(sw.cov_nw_panel(mod.res_pooled, 4, mod.group.groupidx)) se = se.append(pa.DataFrame(pnwse[None,:], index=['OLSpnw'])) print(se) #list(se.index) from statsmodels.iolib.table import SimpleTable headers = [str(i) for i in se.columns] stubs=list(se.index) # print SimpleTable(np.round(np.asarray(se), 4), # headers=headers, # stubs=stubs) print(SimpleTable(np.asarray(se), headers=headers, stubs=stubs, txt_fmt=dict(data_fmts=['%10.4f']), title='Standard Errors'))
bsd-3-clause
viekie/tensorflow-tutorial
chap03/auto_encode_vision.py
1
3796
#!/usr/bin/env python import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot=False) learning_rate = 0.01 training_epochs = 10 batch_size = 256 display_step = 10 n_input = 784 X = tf.placeholder("float", [None, n_input]) n_hidden_1 = 128 n_hidden_2 = 64 n_hidden_3 = 10 n_hidden_4 = 2 weights = { 'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1])), 'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])), 'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])), 'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])), 'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3])), 'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2])), 'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1])), 'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input])) } biases = { 'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])), 'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])), 'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])), 'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])), 'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])), 'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])), 'decoder_b4': tf.Variable(tf.random_normal([n_input])) } def encoder(x): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']), biases['encoder_b2'])) layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']), biases['encoder_b3'])) layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']), biases['encoder_b4']) return layer_4 def decoder(x): layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1'])) layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2'])) layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3'])) layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']), biases['decoder_b4'])) return layer_4 encoder_op = encoder(X) decoder_op = decoder(encoder_op) y_pred = decoder_op y_true = X cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2)) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) with tf.Session() as sess: if int((tf.__version__).split('.')[1]) < 12 and \ int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init) total_batch = int(mnist.train.num_examples/batch_size) for epoch in range(training_epochs): for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) _, c = sess.run([optimizer, cost], feed_dict={X: batch_xs}) if epoch % display_step == 0: print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c)) print("Optimization Finished!") encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images}) print(encoder_result) plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels) plt.colorbar() plt.show()
apache-2.0
saiwing-yeung/scikit-learn
examples/decomposition/plot_kernel_pca.py
353
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
jldbc/pybaseball
pybaseball/statcast_pitcher_spin.py
1
5714
"""Statcast Pitcher Spin These calculations are based on the work by Prof. Alan Nathan of the University of Illinois. Article: http://baseball.physics.illinois.edu/trackman/SpinAxis.pdf Excel Workbook: http://baseball.physics.illinois.edu/trackman/MovementSpinEfficiencyTemplate-v2.xlsx """ from pybaseball import statcast_pitcher import pandas as pd import numpy as np K = .005383 # Environmental Constant DISTANCE_FROM_HOME_TO_MOUND = 60.5 DISTANCE_TO_PLATE_AT_VELOCITY_CAPTURE = 50 Y_VALUE_AT_FINAL_MEASUREMENT = 17/12 GRAVITATIONAL_ACCELERATION = 32.174 def statcast_pitcher_spin(start_dt=None, end_dt=None, player_id=None): pitcher_data = statcast_pitcher(start_dt, end_dt, player_id) spin_df = pitcher_data[[ 'release_extension', 'vx0', 'vy0', 'vz0', 'ax', 'ay', 'az', 'release_spin_rate']].copy() spin_df = find_intermediate_values(spin_df) pitcher_data[['Mx', 'Mz', 'phi', 'theta']] = spin_df[[ 'Mx', 'Mz', 'phi', 'theta']].copy() return pitcher_data # def get_statcast_pither_test_data(): # df = pd.read_csv("tests/statcast_pitching_test_data.csv") # return df def find_intermediate_values(spin_df): """Calls each intermediate function in sequence""" spin_df = find_release_point(spin_df) spin_df = find_release_time(spin_df) spin_df = find_release_velocity_components(spin_df) spin_df = find_flight_time(spin_df) spin_df = find_average_velocity_components(spin_df) spin_df = find_average_velocity(spin_df) spin_df = find_average_drag(spin_df) spin_df = find_magnus_acceleration_magnitude(spin_df) spin_df = find_average_magnus_acceleration(spin_df) spin_df = find_magnus_magnitude(spin_df) spin_df = find_phi(spin_df) spin_df = find_lift_coefficient(spin_df) spin_df = find_spin_factor(spin_df) spin_df = find_transverse_spin(spin_df) spin_df = find_spin_efficiency(spin_df) spin_df = find_theta(spin_df) return spin_df def find_release_point(df): df['yR'] = (DISTANCE_FROM_HOME_TO_MOUND - df['release_extension']) return df def find_release_time(df): df['tR'] = time_duration( df['yR'], df['vy0'], df['ay'], DISTANCE_TO_PLATE_AT_VELOCITY_CAPTURE, False) return df def find_release_velocity_components(df): df['vxR'] = (df['vx0'] + (df['ax'] * df['tR'])) df['vyR'] = (df['vy0'] + (df['ay'] * df['tR'])) df['vzR'] = (df['vz0'] + (df['az'] * df['tR'])) return df def find_flight_time(df): df['tf'] = time_duration( df['yR'], df['vyR'], df['ay'], Y_VALUE_AT_FINAL_MEASUREMENT, True) return df def find_average_velocity_components(df): df['vxbar'] = (2*df['vxR'] + df['ax']*df['tf'])/2 df['vybar'] = (2*df['vyR'] + df['ay']*df['tf'])/2 df['vzbar'] = (2*df['vzR'] + df['az']*df['tf'])/2 return df def find_average_velocity(df): df['vbar'] = three_comp_average(df['vxbar'], df['vybar'], df['vzbar']) return df def find_average_drag(df): df['adrag'] = (-(df['ax']*df['vxbar'] + df['ay']*df['vybar'] + (df['az'] + GRAVITATIONAL_ACCELERATION)*df['vzbar'])/ df['vbar']) return df def find_magnus_acceleration_magnitude(df): df['amagx'] = (df['ax'] + df['adrag']*df['vxbar']/df['vbar']) df['amagy'] = (df['ay'] + df['adrag']*df['vybar']/df['vbar']) df['amagz'] = (df['az'] + df['adrag']*df['vzbar']/df['vbar'] + GRAVITATIONAL_ACCELERATION) return df def find_average_magnus_acceleration(df): df['amag'] = three_comp_average(df['amagx'], df['amagy'], df['amagz']) return df def find_magnus_magnitude(df): df['Mx'] = (6 * df['amagx'] * (df['tf']**2)) df['Mz'] = (6 * df['amagz'] * (df['tf']**2)) return df def find_phi(df): df['phi'] = np.where( df['amagz'] > 0, np.arctan2(df['amagz'], df['amagx'])*180/np.pi, 360 + np.arctan2(df['amagz'], df['amagx'])*180/np.pi) + 90 df['phi'] = df['phi'].round(0).astype('int64') return df def find_lift_coefficient(df): df['Cl'] = (df['amag']/(K*df['vbar']**2)) return df def find_spin_factor(df): """Function to find spin factor Spin Factor formula was derived from a regression of experimental data. The formula below appears in the excel worksheet cited at the top of the file. No explanation is given for the constant values included. """ df['S'] = (0.166*np.log(0.336/(0.336-df['Cl']))) return df def find_transverse_spin(df): df['spinT'] = (78.92*df['S']*df['vbar']) return df def find_spin_efficiency(df): df['spin eff'] = df['spinT']/df['release_spin_rate'] return df def find_theta(df): df['theta'] = df['spin eff'].where( (df['spin eff'] >= -1.0) & (df['spin eff'] <= 1.0), np.nan) df['theta'] = df['theta'].where( df['theta'].isna(), np.arccos(df['theta']) * 180/np.pi).round(0) return df # HELPERS def time_duration(s, v, acc, adj, forward): """ Finds flight time given an original position, velocity, accelaration, and target position. Direction does not affect the time duration. It helps assign a positive or negative value to the flight time. s = (pd.Series) spacial point at known time v = (pd.Series) velocity at known time acc = (pd.Series) acceleration adj = (pd.Series) spatial difference between known and unknown points forward = (bool) indicating whether space_diff is in the positive or negative y direction """ return (-v - np.sqrt(v**2 - 2*acc*((1 if forward else -1) * (s-adj)))) / acc def three_comp_average(comp1, comp2, comp3): return np.sqrt(comp1**2 + comp2**2 + comp3**2)
mit
HaroldMills/Vesper
scripts/old_bird_detector_eval/evaluate_old_bird_detectors.py
1
7268
""" Plots precision vs. recall curves for the Old Bird detectors run on the BirdVox-full-night recordings. """ from collections import defaultdict import csv from matplotlib.backends.backend_pdf import PdfPages from matplotlib.ticker import MultipleLocator import matplotlib.pyplot as plt import numpy as np import pandas as pd import scripts.old_bird_detector_eval.utils as utils ''' Input columns: Call centers: Center Index, Center Frequency Old Bird Detections: Detector, Unit, Threshold, Start Index, Length Output columns: Detector, Unit, Threshold, Ground Truth Calls, Old Bird Calls, Old Bird Clips, Precision, Recall, F1 Detector, Unit, Ground Truth Calls, Old Bird Calls, Old Bird Clips, Precision, Recall, F1 ''' def main(): old_bird_clips = get_old_bird_clips() show_old_bird_clip_counts(old_bird_clips) ground_truth_call_centers = get_ground_truth_call_centers() show_ground_truth_call_counts(ground_truth_call_centers) rows = count_old_bird_calls(old_bird_clips, ground_truth_call_centers) raw_df = create_raw_df(rows) separated_df = create_separated_detectors_df(raw_df) merged_df = create_merged_detectors_df(separated_df) add_precision_recall_f1(raw_df) add_precision_recall_f1(separated_df) add_precision_recall_f1(merged_df) print(raw_df.to_csv()) print(separated_df.to_csv()) print(merged_df.to_csv()) plot_precision_vs_recall(separated_df, merged_df) def get_old_bird_clips(): clips = defaultdict(list) with open(utils.OLD_BIRD_CLIPS_FILE_PATH) as file_: reader = csv.reader(file_) # Skip header. next(reader) for row in reader: key = (row[0], int(row[1]), float(row[2])) value = (int(row[3]), int(row[4])) clips[key].append(value) return clips def show_old_bird_clip_counts(clips): print('Old Bird clip counts:') keys = sorted(clips.keys()) for key in keys: print(' ', key, len(clips[key])) def get_ground_truth_call_centers(): centers = defaultdict(list) csv_files_dir_path = utils.BIRDVOX_70K_UNIT_CLIPS_CSV_FILES_DIR_PATH for file_path in sorted(csv_files_dir_path.iterdir()): if not file_path.name.endswith('.csv'): continue station_num = int(file_path.name.split()[1]) with open(file_path) as file_: reader = csv.reader(file_) # Skip header. next(reader) for row in reader: index = int(row[0]) freq = int(row[1]) if freq != 0: # clip is call call_type = get_call_type(freq) key = (call_type, station_num) centers[key].append(index) return centers def get_call_type(freq): return 'Tseep' if freq >= utils.FREQ_THRESHOLD else 'Thrush' def show_ground_truth_call_counts(call_centers): print('Ground truth call counts:') keys = sorted(call_centers.keys()) for key in keys: print(' ', key, len(call_centers[key])) def count_old_bird_calls(old_bird_clips, ground_truth_call_center_indices): rows = [] for (detector_name, unit_num, threshold), clips in old_bird_clips.items(): call_center_indices = \ ground_truth_call_center_indices[(detector_name, unit_num)] window = utils.OLD_BIRD_CLIP_CALL_CENTER_WINDOWS[detector_name] matches = utils.match_clips_with_calls( clips, call_center_indices, window) old_bird_call_count = len(matches) old_bird_clip_count = len(clips) ground_truth_call_count = len(call_center_indices) rows.append([ detector_name, unit_num, threshold, ground_truth_call_count, old_bird_call_count, old_bird_clip_count]) return rows def create_raw_df(rows): columns = [ 'Detector', 'Unit', 'Threshold', 'Ground Truth Calls', 'Old Bird Calls', 'Old Bird Clips'] return pd.DataFrame(rows, columns=columns) def create_separated_detectors_df(df): df = df.drop(columns=['Unit']) grouped = df.groupby(['Detector', 'Threshold'], as_index=False) return grouped.aggregate(np.sum) def create_merged_detectors_df(df): df = df.drop(columns=['Detector']) grouped = df.groupby(['Threshold'], as_index=False) return grouped.aggregate(np.sum) def sum_counts(df, detector): if detector != 'All': df = df.loc[df['Detector'] == detector] return [ detector, df['Ground Truth Calls'].sum(), df['Old Bird Calls'].sum(), df['Old Bird Clips'].sum()] def add_precision_recall_f1(df): p = df['Old Bird Calls'] / df['Old Bird Clips'] r = df['Old Bird Calls'] / df['Ground Truth Calls'] df['Precision'] = to_percent(p) df['Recall'] = to_percent(r) df['F1'] = to_percent(2 * p * r / (p + r)) def to_percent(x): return round(1000 * x) / 10 def plot_precision_vs_recall(separated_df, merged_df): with PdfPages('/Users/harold/Desktop/plot.pdf') as pdf: _, axes = plt.subplots(figsize=(6, 6)) detector_data = { ('Tseep', 2, 'C0'), ('Thrush', 1.3, 'C1'), } # Plot separate detector curves. for detector_name, threshold, color in detector_data: # Plot curve. df = separated_df.loc[separated_df['Detector'] == detector_name] precisions = df['Precision'].values recalls = df['Recall'].values axes.plot(recalls, precisions, color=color, label=detector_name) # Put marker at Old Bird detector point. indices = dict( (t, i) for i, t in enumerate(df['Threshold'].values)) i = indices[threshold] axes.plot([recalls[i]], [precisions[i]], marker='o', color=color) # Plot merged curve. precisions = merged_df['Precision'].values recalls = merged_df['Recall'].values axes.plot(recalls, precisions, color='C2', label='Tseep and Thrush') plt.xlabel('Recall (%)') plt.ylabel('Precision (%)') limits = (0, 100) plt.xlim(limits) plt.ylim(limits) major_locator = MultipleLocator(25) minor_locator = MultipleLocator(5) axes.xaxis.set_major_locator(major_locator) axes.xaxis.set_minor_locator(minor_locator) axes.yaxis.set_major_locator(major_locator) axes.yaxis.set_minor_locator(minor_locator) plt.grid(which='both') plt.grid(which='minor', alpha=.4) axes.legend() plt.title('Old Bird Detector Precision vs. Recall') pdf.savefig() plt.show() if __name__ == '__main__': main()
mit
Barmaley-exe/scikit-learn
examples/cluster/plot_kmeans_stability_low_dim_dense.py
338
4324
""" ============================================================ Empirical evaluation of the impact of k-means initialization ============================================================ Evaluate the ability of k-means initializations strategies to make the algorithm convergence robust as measured by the relative standard deviation of the inertia of the clustering (i.e. the sum of distances to the nearest cluster center). The first plot shows the best inertia reached for each combination of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method (``init="random"`` or ``init="kmeans++"``) for increasing values of the ``n_init`` parameter that controls the number of initializations. The second plot demonstrate one single run of the ``MiniBatchKMeans`` estimator using a ``init="random"`` and ``n_init=1``. This run leads to a bad convergence (local optimum) with estimated centers stuck between ground truth clusters. The dataset used for evaluation is a 2D grid of isotropic Gaussian clusters widely spaced. """ print(__doc__) # Author: Olivier Grisel <[email protected]> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.utils import shuffle from sklearn.utils import check_random_state from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import KMeans random_state = np.random.RandomState(0) # Number of run (with randomly generated dataset) for each strategy so as # to be able to compute an estimate of the standard deviation n_runs = 5 # k-means models can do several random inits so as to be able to trade # CPU time for convergence robustness n_init_range = np.array([1, 5, 10, 15, 20]) # Datasets generation parameters n_samples_per_center = 100 grid_size = 3 scale = 0.1 n_clusters = grid_size ** 2 def make_data(random_state, n_samples_per_center, grid_size, scale): random_state = check_random_state(random_state) centers = np.array([[i, j] for i in range(grid_size) for j in range(grid_size)]) n_clusters_true, n_features = centers.shape noise = random_state.normal( scale=scale, size=(n_samples_per_center, centers.shape[1])) X = np.concatenate([c + noise for c in centers]) y = np.concatenate([[i] * n_samples_per_center for i in range(n_clusters_true)]) return shuffle(X, y, random_state=random_state) # Part 1: Quantitative evaluation of various init methods fig = plt.figure() plots = [] legends = [] cases = [ (KMeans, 'k-means++', {}), (KMeans, 'random', {}), (MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}), (MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}), ] for factory, init, params in cases: print("Evaluation of %s with %s init" % (factory.__name__, init)) inertia = np.empty((len(n_init_range), n_runs)) for run_id in range(n_runs): X, y = make_data(run_id, n_samples_per_center, grid_size, scale) for i, n_init in enumerate(n_init_range): km = factory(n_clusters=n_clusters, init=init, random_state=run_id, n_init=n_init, **params).fit(X) inertia[i, run_id] = km.inertia_ p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1)) plots.append(p[0]) legends.append("%s with %s init" % (factory.__name__, init)) plt.xlabel('n_init') plt.ylabel('inertia') plt.legend(plots, legends) plt.title("Mean inertia for various k-means init across %d runs" % n_runs) # Part 2: Qualitative visual inspection of the convergence X, y = make_data(random_state, n_samples_per_center, grid_size, scale) km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1, random_state=random_state).fit(X) fig = plt.figure() for k in range(n_clusters): my_members = km.labels_ == k color = cm.spectral(float(k) / n_clusters, 1) plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color) cluster_center = km.cluster_centers_[k] plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=color, markeredgecolor='k', markersize=6) plt.title("Example cluster allocation with a single random init\n" "with MiniBatchKMeans") plt.show()
bsd-3-clause