repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Cesar456/tensorflowTest | test0614/word2vec.py | 1 | 9076 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, vocabulary_size):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words, vocabulary_size)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") | mit |
vincentadam87/gatsby-hackathon-seizure | code/python/seizures/pipelines/FeaturePredictorTest.py | 1 | 15912 | """
package containing classes for testing combination of features, predictor
on specified datasets
@arthor: Wittawat
"""
from seizures.prediction.PredictorBase import PredictorBase
from seizures.features.FeatureExtractBase import FeatureExtractBase
from seizures.Global import Global
from seizures.data.DataLoader_v2 import DataLoader
from seizures.evaluation.XValidation import XValidation
from seizures.evaluation.performance_measures import accuracy, auc
from seizures.data.SubjectEEGData import SubjectEEGData
from abc import abstractmethod
from sklearn import cross_validation
from seizures.preprocessing import preprocessing
import numpy as np
from IPython.core.debugger import Tracer
class FeaturePredictorTestBase(object):
"""
Abstract class for all feature-predictor testers.
@author: Wittawat
"""
def __init__(self, feature_extractor, predictor, patient, data_path):
"""
feature_extractor: an instance of FeatureExtractBase
predictor: an instance of PredictorBase
patient: a string indicating a subject e.g., Dog_1
data_path: full path to directory containing Dog_1, .. Patient_1,..
"""
#assert(isinstance(feature_extractor, FeatureExtractBase))
#assert(isinstance(predictor, PredictorBase))
assert(isinstance(patient, basestring))
@abstractmethod
def test_combination(self, **options):
"""
Test the predictor using features given by feature_extractor
on the data specified by the patient argument.
"""
raise NotImplementedError()
class FeaturesPredictorsTestBase(object):
"""
Abstract class for all features-predictors testers.
The only difference to FeaturePredictorTestBase is that this class
accepts a list of feature_extractor's and a list of predictor's.
@author: Wittawat
"""
def __init__(self, feature_extractors, predictors, patient, data_path):
"""
feature_extractors: list of FeatureExtractBase's
predictors: list of PredictorBase's
patient: a string indicating a subject e.g., Dog_1
data_path: full path to directory containing Dog_1, .. Patient_1,..
"""
assert(type(feature_extractors)==type([]))
assert(type(predictors)==type([]))
assert(isinstance(patient, basestring))
@abstractmethod
def test_combination(self, **options):
"""
Test the predictors using features given by each feature_extractor
on the data specified by the patient argument.
"""
raise NotImplementedError()
class CVFeaturePredictorTester(FeaturePredictorTestBase):
"""
An implementation of FeaturePredictorTestBase which test
by cross validation the given predictor using features from
feature_extractor on the patient data.
@author: Wittawat
"""
def __init__(self, feature_extractor, predictor, patient,
data_path=Global.path_map('clips_folder')):
"""
feature_extractor: an instance of FeatureExtractBase
predictor: an instance of PredictorBase
patient: a string indicating a subject e.g., Dog_1
"""
assert(isinstance(feature_extractor, FeatureExtractBase))
assert(isinstance(predictor, PredictorBase))
assert(isinstance(patient, basestring))
self._feature_extractor = feature_extractor
self._predictor = predictor
self._patient = patient
self._data_path = data_path
def test_combination(self, fold=3, max_segments=-1):
"""
Test the predictor using features given by feature_extractor
on the data specified by the patient argument.
Based on examples/cross_validation_test.py
:param max_segments: maximum segments to load. -1 to use the number of
total segments available. Otherwise, all segments (ictal and interictal)
will be randomly subsampled without replacement.
return: a dictionary containing error report
"""
predictor = self._predictor
loader = DataLoader(self._data_path, self._feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(
self._patient, fold, max_segments)
# running cross validation
#print 'Testing %d-fold CV on data of %s'%(fold, self._patient)
#print "\ncross validation: seizures vs not"
result_seizure = XValidation.evaluate(X_list, y_seizure, predictor, evaluation=auc)
#print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
#% (np.mean(result_seizure), np.std(result_seizure), result_seizure)
#print "\ncross validation: early_vs_not"
result_early = XValidation.evaluate(X_list, y_early, predictor, evaluation=auc)
#print 'cross-validation results: mean = %.3f, sd = %.3f, raw scores = %s' \
#% (np.mean(result_early), np.std(result_early), result_early)
# dict containing bunch of reports
r = {}
r['predictor'] = predictor
r['feature_extractor'] = self._feature_extractor
# total features extracted. X_i is n x d
r['total_features'] = X_list[0].shape[1]
r['cv_fold'] = fold
r['seizure_mean_auc'] = np.mean(result_seizure)
r['seizure_std_auc'] = np.std(result_seizure)
r['early_mean_auc'] = np.mean(result_early)
r['early_std_auc'] = np.std(result_early)
return r
class CVFeaturesPredictorsTester(FeaturePredictorTestBase):
"""
An implementation of FeaturesPredictorsTestBase which test
by cross validation the given predictors using features from each
feature_extractor on the patient data.
@author: Wittawat
"""
def __init__(self, feature_extractors, predictors, patient,
data_path=Global.path_map('clips_folder')):
assert(type(feature_extractors)==type([]))
assert(type(predictors)==type([]))
assert(isinstance(patient, basestring))
self._feature_extractors = feature_extractors
self._predictors = predictors
self._patient = patient
self._data_path = data_path
@staticmethod
def test_all_combinations(features, feature_extractors, predictors):
"""
features is a list [(X_seizure, y_seizure, X_early, y_early)] where each element
in the tuple is itself a list of length = fold containing data in each
CV fold
return an instance of FeaturesPredictsTable
"""
# these loops can be parallelized.
# !! Can be improved !!
L = []
for i, feature_extractor in enumerate(feature_extractors):
feature_list = []
X_seizure, y_seizure, X_early, y_early = features[i]
for j, predictor in enumerate(predictors):
print 'Evaluating feat: %s + pred: %s on seizure task'%(str(feature_extractor), str(predictor) )
result_seizure = XValidation.evaluate(X_seizure, y_seizure, predictor, evaluation=auc)
print 'Evaluating feat: %s + pred: %s on early seizure task'%(str(feature_extractor), str(predictor) )
result_early = XValidation.evaluate(X_early, y_early, predictor, evaluation=auc)
r = {}
r['predictor'] = predictor
r['feature_extractor'] = feature_extractor
# total features extracted. X_i is n x d
r['total_features'] = X_early[0].shape[1]
r['cv_fold'] = len(X_early)
r['seizure_mean_auc'] = np.mean(result_seizure)
r['seizure_std_auc'] = np.std(result_seizure)
r['early_mean_auc'] = np.mean(result_early)
r['early_std_auc'] = np.std(result_early)
feature_list.append(r)
L.append(feature_list)
return FeaturesPredictsTable(L)
def test_combination(self, fold=3, max_segments=-1):
"""
Test the predictors using features given by each feature_extractor
in feature_extractors on the data specified by the patient argument.
:param max_segments: maximum segments to load. -1 to use the number of
total segments available. Otherwise, all segments (ictal and interictal)
will be randomly subsampled without replacement.
return: an instance of FeaturesPredictsTable """
# preload data and extract features
features = [] # list of feature tuples. list length = len(self._feature_extractors)
for i, feature_extractor in enumerate(self._feature_extractors):
loader = DataLoader(self._data_path, feature_extractor)
X_list,y_seizure, y_early = loader.blocks_for_Xvalidation(
self._patient, fold, max_segments)
features.append( (X_list, y_seizure, X_list, y_early) )
T = CVFeaturesPredictorsTester.test_all_combinations(features,
self._feature_extractors, self._predictors)
return T
class FeaturesPredictsTable(object):
"""
Simple class to manipuldate data table returned by
CVFeaturesPredictorsTester.
To export the results in other ways,
just add your own methods here. See print_ascii_table() as an example.
@author Wittawat
"""
def __init__(self, features_predictors_results):
"""
features_predictors_results: a two-dimensional list L of dictionaries
containing error report such that L[i] is a list of reports for
feature_extractor i on all predictors.
"""
self.raw_table = features_predictors_results
def print_table(self, attribute):
"""
Report result in a feature_extractors x predictors table.
attribute specifies what entries to report in the table.
attribute is a string with possible values given in
CVFeaturePredictorTester.test_combination() (the key in the returned
dictionary)
"""
print "# From " + type(self).__name__.split('.')[-1]
print "Reporting %s" % attribute
L = self.raw_table
from prettytable import PrettyTable
predictors_strs = [str(rep['predictor']) for rep in L[0]]
extractor_strs = [str(l[0]['feature_extractor']) for l in L]
# see https://code.google.com/p/prettytable/wiki/Tutorial
T = PrettyTable(['feat. \ pred.'] + predictors_strs)
T.padding_width = 1 # One space between column edges and contents (default)
for i, feature_extractor in enumerate(extractor_strs):
predictors_values = [r[attribute] for r in L[i]]
if isinstance(predictors_values[0], float):
predictors_values = ['%.3g'%v for v in predictors_values]
extractor_col = extractor_strs[i]
T.add_row([extractor_col] + predictors_values )
print T
def to_csv(self, file_name):
raise NotImplementedError('Can someone implement this ? ')
def __str__(self):
self.print_ascii_table('seizure_mean_auc')
class CachedCVFeaPredTester(FeaturePredictorTestBase):
"""
An implementation of FeaturesPredictorsTestBase which test
by cross validation the given predictors using features from each
feature_extractor on the patient data. Cache loaded raw data so that
loading is done only once.
@author: Wittawat
"""
def __init__(self, feature_extractors, predictors, patient,
data_path=Global.path_map('clips_folder'),params=None):
assert(type(feature_extractors)==type([]))
assert(type(predictors)==type([]))
assert(isinstance(patient, basestring))
self._feature_extractors = feature_extractors
self._predictors = predictors
self._patient = patient
self._data_path = data_path
self.params = params
def test_combination(self, fold=3, max_segments=-1):
"""
Test the predictors using features given by each feature_extractor
in feature_extractors on the data specified by the patient argument.
:param max_segments: maximum segments to load. -1 to use the number of
total segments available. Otherwise, all segments (ictal and interictal)
will be randomly subsampled without replacement.
return: an instance of FeaturesPredictsTable """
loader = SubjectEEGData(self._patient, self._data_path, use_cache=True,
max_train_segments=max_segments)
# a list of (Instance, y_seizure, y_early)'s
train_data = loader.get_train_data()
fs = train_data[0][0].sample_rate
# preprocessing.
#params = {'fs':fs,
# 'anti_alias_cutoff': 100.,
## 'anti_alias_width': 30.,
# 'anti_alias_attenuation' : 40,
# 'elec_noise_width' :3.,
# 'elec_noise_attenuation' : 60.0,
# 'elec_noise_cutoff' : [49.,51.]}
# list of preprocessed tuples
params = self.params
params['fs']=fs
# for (x, y_seizure, y_early) in train_data:
# x.eeg_data = preprocessing.preprocess_multichannel_data(x.eeg_data, self.params)
#train_data2 = []
#for (x, y_seizure, y_early) in train_data:
# x.eeg_data = preprocessing.preprocess_multichannel_data(x.eeg_data, params)
# train_data2.append(((x, y_seizure, y_early)))
#train_data =train_data2
# pre-extract features
features = [] # list of feature tuples. list length = len(self._feature_extractors)
Y_seizure = np.array([y_seizure for (x, y_seizure, y_early) in train_data])
Y_early = np.array([y_early for (x, y_seizure, y_early) in train_data])
skf_seizure = cross_validation.StratifiedKFold(Y_seizure, n_folds=fold)
skf_early = cross_validation.StratifiedKFold(Y_early, n_folds=fold)
for i, feature_extractor in enumerate(self._feature_extractors):
#print 'Extracting features with %s'%str(feature_extractor)
#Xlist = [feature_extractor.extract(x) for (x, y_seizure, y_early)
# in train_data]
Xlist = []
for (x, y_seizure, y_early) in train_data:
#print '---------'
#print x.eeg_data.shape
params['fs']=x.sample_rate
x.eeg_data = preprocessing.preprocess_multichannel_data(x.eeg_data, params)
feat =feature_extractor.extract(x)
#print x.eeg_data.shape
#print feat.shape
Xlist.append(feat)
#Tracer()()
# Xlist = list of ndarray's
#print len(Xlist), Xlist[0].shape,len(Xlist[0])
n = len(Xlist)
#d = len(Xlist[0])
d = Xlist[0].shape[0]
# make 2d numpy array
#print n,d
X = np.zeros((n, d))
#print X.shape
for i in xrange(len(Xlist)):
#print Xlist[i].shape, X[i, :].shape
X[i, :] = Xlist[i].T
# chunk data for cross validation
# construct a list of 2d numpy arrays to be fed to XValidation
# tr_I = train index, te_I = test index
X_seizure = []
y_seizure = []
#Tracer()()
for tr_I, te_I in skf_seizure:
X_seizure.append(X[tr_I, :])
y_seizure.append(Y_seizure[tr_I])
X_early = []
y_early = []
for tr_I, te_I in skf_early:
X_early.append(X[tr_I, :])
y_early.append(Y_early[tr_I])
features.append( (X_seizure, y_seizure, X_early, y_early) )
T = CVFeaturesPredictorsTester.test_all_combinations(features,
self._feature_extractors, self._predictors)
return T
| bsd-2-clause |
jinghaomiao/apollo | modules/tools/prediction/data_pipelines/junctionMLP_train.py | 3 | 4328 | #!/usr/bin/env python3
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
tensorflow 1.11
"""
import os
import h5py
import logging
import argparse
import numpy as np
import tensorflow as tf
from modules.tools.prediction.data_pipelines.proto import fnn_model_pb2
from fnn_model_pb2 import FnnModel, Layer
from sklearn.model_selection import train_test_split
dim_input = 7 + 72
dim_output = 12
def load_data(filename):
"""
Load the data from h5 file to the format of numpy
"""
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def data_preprocessing(data):
X = data[:, :dim_input]
Y = data[:, -dim_output:]
return X, Y
def save_model(model, filename):
"""
Save the trained model parameters into protobuf binary format file
"""
net_params = FnnModel()
net_params.num_layer = 0
for layer in model.layers:
net_params.num_layer += 1
net_layer = net_params.layer.add()
config = layer.get_config()
net_layer.layer_input_dim = dim_input
net_layer.layer_output_dim = dim_output
if config['activation'] == 'relu':
net_layer.layer_activation_func = fnn_model_pb2.Layer.RELU
elif config['activation'] == 'tanh':
net_layer.layer_activation_func = fnn_model_pb2.Layer.TANH
elif config['activation'] == 'sigmoid':
net_layer.layer_activation_func = fnn_model_pb2.Layer.SIGMOID
elif config['activation'] == 'softmax':
net_layer.layer_activation_func = fnn_model_pb2.Layer.SOFTMAX
weights, bias = layer.get_weights()
net_layer.layer_bias.columns.extend(bias.reshape(-1).tolist())
for col in weights.tolist():
row = net_layer.layer_input_weight.rows.add()
row.columns.extend(col)
net_params.dim_input = dim_input
net_params.dim_output = dim_output
with open(filename, 'wb') as params_file:
params_file.write(net_params.SerializeToString())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('filename', type=str, help='h5 file of data.')
args = parser.parse_args()
file = args.filename
# load_data
data = load_data(file)
print("Data load success, with data shape: " + str(data.shape))
train_data, test_data = train_test_split(data, test_size=0.2)
X_train, Y_train = data_preprocessing(train_data)
X_test, Y_test = data_preprocessing(test_data)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(30, activation=tf.nn.relu),
tf.keras.layers.Dense(20, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
# loss='MSE',
metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=5)
model_path = os.path.join(os.getcwd(), "junction_mlp_vehicle_model.bin")
save_model(model, model_path)
print("Model saved to: " + model_path)
score = model.evaluate(X_test, Y_test)
print("Testing accuracy is: " + str(score))
| apache-2.0 |
kpespinosa/BuildingMachineLearningSystemsWithPython | ch07/lr10k.py | 24 | 1228 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import KFold
# Whether to use Elastic nets (otherwise, ordinary linear regression is used)
# Load data:
data, target = load_svmlight_file('data/E2006.train')
lr = LinearRegression()
# Compute error on training data to demonstrate that we can obtain near perfect
# scores:
lr.fit(data, target)
pred = lr.predict(data)
print('RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on training, {:.2}'.format(r2_score(target, pred)))
print('')
pred = np.zeros_like(target)
kf = KFold(len(target), n_folds=5)
for train, test in kf:
lr.fit(data[train], target[train])
pred[test] = lr.predict(data[test])
print('RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
| mit |
cpcloud/blaze | blaze/compute/tests/test_comprehensive.py | 2 | 4573 | from __future__ import absolute_import, division, print_function
import numpy as np
from pandas import DataFrame
import numpy as np
from odo import resource, into
from datashape.predicates import isscalar, iscollection, isrecord
from blaze.expr import symbol, by
from blaze.interactive import Data
from blaze.compute import compute
from blaze.expr.functions import sin, exp
sources = []
t = symbol('t', 'var * {amount: int64, id: int64, name: string}')
L = [[ 100, 1, 'Alice'],
[ 200, 2, 'Bob'],
[ 300, 3, 'Charlie'],
[-400, 4, 'Dan'],
[ 500, 5, 'Edith']]
df = DataFrame(L, columns=['amount', 'id', 'name'])
x = into(np.ndarray, df)
sources = [df, x]
try:
import sqlalchemy
sql = resource('sqlite:///:memory:::accounts', dshape=t.dshape)
into(sql, L)
sources.append(sql)
except:
sql = None
try:
import bcolz
bc = into(bcolz.ctable, df)
sources.append(bc)
except ImportError:
bc = None
try:
import pymongo
except ImportError:
pymongo = mongo = None
if pymongo:
try:
db = pymongo.MongoClient().db
try:
coll = db._test_comprehensive
except AttributeError:
coll = db['_test_comprehensive']
coll.drop()
mongo = into(coll, df)
sources.append(mongo)
except pymongo.errors.ConnectionFailure:
mongo = None
# {expr: [list-of-exclusions]}
expressions = {
t: [],
t['id']: [],
abs(t['amount']): [],
t.id.max(): [],
t.amount.sum(): [],
t.amount.sum(keepdims=True): [],
t.amount.count(keepdims=True): [],
t.amount.nunique(keepdims=True): [mongo],
t.amount.nunique(): [],
t.amount.head(): [],
t.amount + 1: [mongo],
sin(t.amount): [sql, mongo], # sqlite doesn't support trig
exp(t.amount): [sql, mongo],
t.amount > 50: [mongo],
t[t.amount > 50]: [],
t[t.name.like('Alic*')]: [],
t.sort('name'): [bc],
t.sort('name', ascending=False): [bc],
t.head(3): [],
t.name.distinct(): [],
t[t.amount > 50]['name']: [], # odd ordering issue
t.id.map(lambda x: x + 1, schema='int64', name='id'): [sql, mongo],
t[t.amount > 50]['name']: [],
by(t.name, total=t.amount.sum()): [],
by(t.id, count=t.id.count()): [],
by(t[['id', 'amount']], count=t.id.count()): [],
by(t[['id', 'amount']], total=(t.amount + 1).sum()): [mongo],
by(t[['id', 'amount']], n=t.name.nunique()): [mongo, bc],
by(t.id, count=t.amount.count()): [],
by(t.id, n=t.id.nunique()): [mongo, bc],
# by(t, count=t.count()): [],
# by(t.id, count=t.count()): [],
t[['amount', 'id']]: [x], # https://github.com/numpy/numpy/issues/3256
t[['id', 'amount']]: [x, bc], # bcolz sorting
t[0]: [sql, mongo, bc],
t[::2]: [sql, mongo, bc],
t.id.utcfromtimestamp: [sql],
t.distinct().nrows: [],
t.nelements(axis=0): [],
t.nelements(axis=None): [],
t.amount.truncate(200): [sql]
}
base = df
def df_eq(a, b):
return (list(a.columns) == list(b.columns)
# and list(a.dtypes) == list(b.dtypes)
and into(set, into(list, a)) == into(set, into(list, b)))
def typename(obj):
return type(obj).__name__
def test_base():
for expr, exclusions in expressions.items():
if iscollection(expr.dshape):
model = into(DataFrame, into(np.ndarray, expr._subs({t: Data(base, t.dshape)})))
else:
model = compute(expr._subs({t: Data(base, t.dshape)}))
print('\nexpr: %s\n' % expr)
for source in sources:
if id(source) in map(id, exclusions):
continue
print('%s <- %s' % (typename(model), typename(source)))
T = Data(source)
if iscollection(expr.dshape):
result = into(type(model), expr._subs({t: T}))
if isscalar(expr.dshape.measure):
assert set(into(list, result)) == set(into(list, model))
else:
assert df_eq(result, model)
elif isrecord(expr.dshape):
result = compute(expr._subs({t: T}))
assert into(tuple, result) == into(tuple, model)
else:
result = compute(expr._subs({t: T}))
try:
result = result.scalar()
except AttributeError:
pass
assert result == model
| bsd-3-clause |
dtamayo/rebound | python_examples/megno/problem.py | 1 | 3478 | #!/usr/bin/python
# This example integrates Jupiter and Saturn in the Solar system for a variety of initial conditions.
# Alongside the normal equations of motions, IAS15 is used to integrate the variational equations.
# These can be used to measure the Mean Exponential Growth of Nearby Orbits (MEGNO), a chaos indicator.
# This example script runs 12^2 simulations and plots the MEGNO value. Values close to <Y>=2 correspond
# to regular quasi-periodic orbits. Higher values of <Y> correspond to chaotic orbits.
# Import matplotlib
import matplotlib; matplotlib.use("pdf")
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# Import the rebound module
import rebound
# Import other modules
import numpy as np
import multiprocessing
import warnings
# Runs one simulation.
def simulation(par):
saturn_a, saturn_e = par
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.min_dt = 5.
sim.dt = 1.
# These parameters are only approximately those of Jupiter and Saturn.
sun = rebound.Particle(m=1.)
sim.add(sun)
jupiter = sim.add(primary=sun,m=0.000954, a=5.204, M=0.600, omega=0.257, e=0.048)
saturn = sim.add(primary=sun,m=0.000285, a=saturn_a, M=0.871, omega=1.616, e=saturn_e)
sim.move_to_com()
sim.init_megno()
# Hide warning messages (WHFast timestep too large)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim.integrate(1e3*2.*np.pi)
return [sim.calculate_megno(),1./(sim.calculate_lyapunov()*2.*np.pi)] # returns MEGNO and Lypunov timescale in years
### Setup grid and run many simulations in parallel
N = 100 # Grid size, increase this number to see more detail
a = np.linspace(7.,10.,N) # range of saturn semi-major axis in AU
e = np.linspace(0.,0.5,N) # range of saturn eccentricity
parameters = []
for _e in e:
for _a in a:
parameters.append([_a,_e])
simulation((8,0.))
# Run simulations in parallel
pool = rebound.InterruptiblePool() # Number of threads default to the number of CPUs on the system
print("Running %d simulations on %d threads..." % (len(parameters), pool._processes))
res = np.nan_to_num(np.array(pool.map(simulation,parameters)))
megno = np.clip(res[:,0].reshape((N,N)),1.8,4.) # clip arrays to plot saturated
lyaptimescale = np.clip(np.absolute(res[:,1].reshape((N,N))),1e1,1e5)
### Create plot and save as pdf
# Setup plots
f, axarr = plt.subplots(2,figsize=(10,10))
extent = [a.min(), a.max(), e.min(), e.max()]
for ax in axarr:
ax.set_xlim(extent[0],extent[1])
ax.set_ylim(extent[2],extent[3])
ax.set_xlabel("$a_{\mathrm{Saturn}}$ [AU]")
ax.set_ylabel("$e_{\mathrm{Saturn}}$")
# Plot MEGNO
im1 = axarr[0].imshow(megno, vmin=1.8, vmax=4., aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn_r", extent=extent)
cb1 = plt.colorbar(im1, ax=axarr[0])
cb1.solids.set_rasterized(True)
cb1.set_label("MEGNO $\\langle Y \\rangle$")
# Plot Lyapunov timescale
im2 = axarr[1].imshow(lyaptimescale, vmin=1e1, vmax=1e5, norm=LogNorm(), aspect='auto', origin="lower", interpolation='nearest', cmap="RdYlGn", extent=extent)
cb2 = plt.colorbar(im2, ax=axarr[1])
cb2.solids.set_rasterized(True)
cb2.set_label("Lyapunov timescale [years]")
plt.savefig("megno.pdf")
### Automatically open plot (OSX only)
from sys import platform as _platform
if _platform == "darwin":
import os
os.system("open megno.pdf")
| gpl-3.0 |
jayflo/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
fernandopso/twitter-svm-tfidf.py | app/miner/models.py | 2 | 1069 | #!/usr/bin/env python2.7
#-*- coding: utf-8 -*-
from sklearn import svm
class Models(object):
def __init__(self, trained, evaluations, classify):
self.trained = trained
self.evaluations = evaluations
self.classify = classify
self.result = list()
def svm_linear(self):
"""
Classification tweets with linear Support Vector Machine
"""
classification = svm.SVC(kernel='linear')
classification.fit(self.trained, self.evaluations)
prediction = classification.predict(self.classify)
for p in prediction:
self.result.append(p)
print "\n##############################################################"
print "The classification result of %d tweets is:\n" % len(self.result)
print "Positive: %d tweets" % self.result.count(1)
print "Negative: %d tweets" % self.result.count(2)
print "Neutral: %d tweets" % self.result.count(3)
print "Unknown: %d tweets" % self.result.count(4)
return prediction
| mit |
adamamiller/NUREU17 | LSST/SuperNovaLightCurves/process_SN.py | 3 | 2912 | #import necessary python libraries
import json
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit, minimize
import pandas as pd
import math
from JSON_to_DF import JSON_to_DataFrame
from Lightcurve_class import *
import celerite
import pickle
#Create Kernels for Gaussian Process
#Real term parameter initialization
a = 1e-5
c = 1
#Matern term parameter initialization
sig = 1e-5
rho = 100
#Bounds on parameters
bounds = dict(log_a = (-15,15), log_c = (-15,15))
bounds = dict(log_sigma = (-15, 15), log_rho = (-15, 15))
#Create Kernels
Real_Kernel = celerite.terms.RealTerm(log_a = np.log(a), log_c = np.log(c), bounds=bounds)
Matern_Kernel = celerite.terms.Matern32Term(log_sigma = np.log(sig), log_rho = np.log(rho))
#Create lists to store Rchi2 and median values for given fit
Poly4_median = []
Poly4_Rchi2 = []
Poly6_median = []
Poly6_Rchi2 = []
Poly8_median = []
Poly8_Rchi2 = []
GP_Real_median = []
GP_Real_Rchi2 = []
GP_Matern_median = []
GP_Matern_Rchi2 = []
Kapernka_median = []
Kapernka_Rchi2 = []
Bazin_median = []
Bazin_Rchi2 = []
#Loop through pickle files gathering Rchi2 and median data for each fit
directory = "../../../OSC_data/pickled_data/"
for roots, dirs, files in os.walk(directory):
for file in files:
SN = deserialize(directory + file)
for key in SN.Lightcurves.keys():
if(SN.Lightcurves[key].n_good_obs < 3):
continue
print(SN.name, key)
SN.Lightcurves[key].polynomial_fit_plot(4, plot=False)
SN.Lightcurves[key].polynomial_fit_plot(6, plot=False)
SN.Lightcurves[key].polynomial_fit_plot(8, plot=False)
SN.Lightcurves[key].Kapernka_fit_plot(plot=False)
SN.Lightcurves[key].Bazin_fit_plot(plot=False)
SN.Lightcurves[key].Gaussian_process(Real_Kernel, plot=False)
SN.Lightcurves[key].Gaussian_process(Matern_Kernel, plot=False)
print("Models fitted")
for fit, value in SN.Lightcurves[key].Rchi2.items():
if(fit == 'poly_4'):
Poly4_Rchi2.append(value)
elif(fit == 'poly_6'):
Poly6_Rchi2.append(value)
elif(fit == 'poly_8'):
Poly8_Rchi2.append(value)
elif(fit == 'GP_1'):
GP_Real_Rchi2.append(value)
elif(fit == 'GP_2'):
GP_Matern_Rchi2.append(value)
elif(fit == 'Kapernka'):
Kapernka_Rchi2.append(value)
elif(fit == 'Bazin'):
Bazin_Rchi2.append(value)
print("Rchi2 loaded")
for fit, value in SN.Lightcurves[key].medians.items():
if(fit == 'poly_4'):
Poly4_median.append(value)
elif(fit == 'poly_6'):
Poly6_median.append(value)
elif(fit == 'poly_8'):
Poly8_median.append(value)
#elif(key == 'GP'):
#GP_Real_median.append(value)
elif(fit == 'GP'):
GP_Matern_median.append(value)
elif(fit == 'Kapernka'):
Kapernka_median.append(value)
elif(fit == 'Bazin'):
Bazin_median.append(value)
print("medians loaded")
print(len(Poly6_median))
print(len(Poly6_Rchi2))
| mit |
sarvex/PythonMachineLearning | Chapter 1/plot_forest.py | 40 | 1279 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def plot_forest(max_depth=1):
plt.figure()
ax = plt.gca()
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
forest = RandomForestClassifier(n_estimators=20, max_depth=max_depth,
random_state=1).fit(X, y)
Z = forest.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=.4)
ax.set_title("max_depth = %d" % max_depth)
else:
ax.set_title("data set")
ax.scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
def plot_forest_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_forest, max_depth=slider)
| isc |
aewhatley/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
IBYoung/oceanbase | oceanbase_0.4/tools/deploy/perf/perf_base.py | 12 | 18473 | # -*- coding: utf-8 -*-
import time
import datetime
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
def perf_client_attrs():
prepare_data = '''sh: ssh $usr@$ip "${client_env_vars} client_idx=$idx $dir/$client/stress.sh prepare ${type} ${client_start_args}"'''
return locals()
def perf_ct_attrs():
prepare_data = 'all: client prepare_data type=all'
reboot_to_prepare_data = 'seq: stop conf rsync clear prepare_data'
return locals()
def perf_role_attrs():
save_profile = '''sh: scp $usr@$ip:$dir/log/$role.profile $_rest_'''
save_gprofile = '''sh: scp $usr@$ip:$gprofiler_output $_rest_'''
start_periodic_pstack = '''sh: ssh $usr@$ip "sh $dir/tools/periodic_pstack.sh $__self__ \`pgrep -f ^$exe\` $dir/log/pstacks/ $ptime > /dev/null 2>&1 < /dev/zero &"'''
save_periodic_pstack = '''sh: scp -r $usr@$ip:$dir/log/pstacks/$__self__ $_rest_'''
return locals()
def perf_obi_attrs():
perf_load_params = r'''call: obmysql extra="-e \"alter system set merge_delay_interval='1s' server_type=chunkserver;\""'''
perf_create_tables = r'''call: obmysql extra="< perf/${case}.create"'''
perf_init = 'seq: perf_load_params perf_create_tables sleep[sleep_time=2]'
perf = 'seq: reboot sleep[sleep_time=10] perf_init perf_prepare.reboot_to_prepare_data turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf'
perf_ups = 'seq: reboot sleep[sleep_time=10] perf_init turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf'
running_ts = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
local_tmp_dir = "tmp.%s" % (running_ts)
def get_cluster_ips(*args, **ob):
def get_server_ips(server_role):
server_list = get_match_child(ob, server_role)
return [find_attr(find_attr(ob, k), "ip") for k in server_list.keys()]
cluster_ips = get_server_ips("mergeserver") + get_server_ips("chunkserver") + get_server_ips("updateserver") + get_server_ips("rootserver")
seen = set()
seen_add = seen.add
return [ x for x in cluster_ips if x not in seen and not seen_add(x)]
def turn_on_perf(*args, **ob):
# turn on profile log and gprofiler
all_do(ob, 'mergeserver', 'kill', '-50')
all_do(ob, 'chunkserver', 'kill', '-50')
all_do(ob, 'mergeserver', 'kill', '-60')
all_do(ob, 'chunkserver', 'kill', '-60')
all_do(ob, 'updateserver', 'kill', '-60')
#call_(ob, 'ms0.kill', '-50')
#call_(ob, 'ms0.kill', '-60')
#call_(ob, 'cs0.kill', '-60')
#call_(ob, 'ups0.kill', '-60')
for ip in get_cluster_ips(*args, **ob):
ssh(ip, sub2("$dir/tools/linuxmon_x64.bin time=${ptime}s wait=1 back=yes > $dir/server_stats 2>&1 < /dev/zero &", ob))
#call_(ob, 'ms0.start_periodic_pstack')
#call_(ob, 'cs0.start_periodic_pstack')
#call_(ob, 'ups0.start_periodic_pstack')
def turn_off_perf(*args, **ob):
# turn off profile log and gprofiler
all_do(ob, 'mergeserver', 'kill', '-51')
all_do(ob, 'chunkserver', 'kill', '-51')
all_do(ob, 'mergeserver', 'kill', '-61')
all_do(ob, 'chunkserver', 'kill', '-61')
all_do(ob, 'updateserver', 'kill', '-61')
#call_(ob, 'ms0.kill', '-51')
#call_(ob, 'ms0.kill', '-61')
#call_(ob, 'cs0.kill', '-61')
#call_(ob, 'ups0.kill', '-61')
def pprof_gprofile_output(server_bin, profile_output, perf_result_dir, *args, **ob):
st = time.time()
cost_graph_name = "${role}_${ip}_cost_graph.pdf"
cost_graph_path = "%s/%s" % (perf_result_dir, cost_graph_name)
top50_cmd = sub2("pprof --text $server_bin $profile_output", locals())
graph_cmd = sub2("pprof --pdf $server_bin --edgefraction=0 $profile_output > $cost_graph_path", locals())
pro_res = popen(sub2(top50_cmd, ob)).splitlines()
i = 0
while i < len(pro_res):
if pro_res[i].startswith('Total: '):
i += 1
break
i += 1
func_top50 = '\n'.join(pro_res[i:i + 50])
sh(sub2(graph_cmd, ob))
info('drawing %s profile graph costs %ss' % (server_bin, time.time() - st))
output = """
<p>$role $ip 函数消耗排名前50:
<pre>%s</pre></p>
<p><a href="%s">函数消耗线框图</a></p>
""" % (func_top50, cost_graph_name)
return sub2(output, ob)
def parse_profile_log(profile_log, perf_result_dir, *args, **ob):
time_format = "%Y-%m-%d %H:%M:%S"
query_ratio = int(sub2("$ptime", ob), 10)
d = dict()
start_time = None
end_time = None
sql_count = 0
real_sql_count = 0
sql_time = 0
sql_time_dist = dict()
wait_time = 0
ud = dict(sql_count = 0, real_sql_count = 0, sql_time = 0, wait_time = 0)
qps2time = dict()
rpcs = dict()
rpcs_html = ""
wait_times = []
parse_log_st = time.time()
def get_packet_name(pcode):
if pcode == 4002: return "OB_PHY_PLAN_EXECUTE"
elif pcode == 409: return "OB_SQL_GET_REQUEST"
elif pcode == 405: return "OB_SQL_SCAN_REQUEST"
else: return "OB_UNKNOWN_PACKET"
def add_sql(trace_id, ts, sqlt, waitt, rpc_list):
if qps2time.has_key(ts):
qps2time[ts] += query_ratio
else:
qps2time[ts] = 0
ud['sql_count'] += query_ratio
ud['real_sql_count'] += 1
ud['sql_time'] += sqlt
if sql_time_dist.has_key(sqlt):
sql_time_dist[sqlt] += query_ratio
else:
sql_time_dist[sqlt] = 0
ud['wait_time'] += waitt
wait_times.append(waitt)
for rpc in rpc_list:
if rpcs.has_key(rpc['pcode']):
rpcs[rpc['pcode']]['rpc_time'] += rpc['latency']
rpcs[rpc['pcode']]['rpc_times'].append(rpc['latency'])
else:
rpcs[rpc['pcode']] = dict(rpc_time = rpc['latency'], rpc_times = [rpc['latency']])
with open(sub2(profile_log, ob)) as f:
for l in f:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(6))
trace_id = m.group(1)
ts = m.group(6)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4)))]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
else:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(8))
trace_id = m.group(1)
ts = m.group(8)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))),
dict(pcode = int(m.group(7)), latency = int(m.group(6))),]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
else:
m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l)
if m is not None:
end_time = int(m.group(10))
trace_id = m.group(1)
ts = m.group(10)[:-6]
sql_time = int(m.group(2))
wait_time = int(m.group(3))
rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))),
dict(pcode = int(m.group(7)), latency = int(m.group(6))),
dict(pcode = int(m.group(9)), latency = int(m.group(8))),]
add_sql(trace_id, ts, sql_time, wait_time, rpc_list)
if start_time is None and end_time is not None:
start_time = end_time
info("parsing log costs %ss" % (time.time() - parse_log_st))
sql_time = ud['sql_time']
sql_count = ud['sql_count']
real_sql_count = ud['real_sql_count']
wait_time = ud['wait_time']
drawing_st = time.time()
if end_time is None:
elapsed_seconds = 0
qps = 0
avg_sql_time = 0
avg_wait_time = 0
for pcode, rpc in rpcs.items():
rpc['avg_rpc_time'] = 0
else:
elapsed_seconds = (end_time - start_time) / 10**6
if elapsed_seconds > 0:
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / real_sql_count
avg_wait_time = float(wait_time) / real_sql_count
else:
qps = 0
avg_sql_time = 0
avg_wait_time = 0
for pcode, rpc in rpcs.items():
rpc['avg_rpc_time'] = float(rpc['rpc_time']) / len(rpc['rpc_times'])
if plt is not None:
plt.plot([x for k,x in sorted(qps2time.items())], '-')
plt.xlabel('Timeline')
plt.ylabel('QPS')
plt.savefig(sub2("%s/${role}_${ip}_qps.png" % (perf_result_dir), ob))
plt.clf()
plt.bar(sql_time_dist.keys(), sql_time_dist.values())
plt.xlabel('Response Time (us)')
plt.ylabel('Number of Requests')
plt.savefig(sub2("%s/${role}_${ip}_total_time_dist.png" % (perf_result_dir), ob))
plt.clf()
plt.plot(wait_times, ',')
plt.xlabel('Timeline')
plt.ylabel('Wait Time in Mergeserver Queue (us)')
plt.savefig(sub2("%s/${role}_${ip}_queue_time.png" % (perf_result_dir), ob))
plt.clf()
for pcode, rpc in rpcs.items():
plt.plot(rpc['rpc_times'], ',')
plt.xlabel('Timeline')
plt.ylabel('Response Time (us)')
plt.savefig(sub2("%s/${role}_${ip}_%s.png" % (perf_result_dir, pcode), ob))
plt.clf()
rpcs_html += sub2("""<p>$role $ip %s(%s)请求次数:%s 平均响应延时:%sus<br><img src="${role}_${ip}_%s.png" /></p>"""
% (pcode, get_packet_name(pcode), len(rpc['rpc_times']), rpc['avg_rpc_time'], pcode), ob)
info("drawing performance graph costs %ss" % (time.time() - drawing_st))
parse_perf = sub2(sub2("""
<p> ${role} ${ip} 测试运行时间:${elapsed_seconds}s<br>
${role} ${ip} SQL请求次数:$sql_count<br>
${role} ${ip} MS的QPS:$qps</p>
<p>${role} ${ip} QPS:<br>
<img src="${role}_${ip}_qps.png" /></p>
<p>${role} ${ip} 平均响应延时:${avg_sql_time}us<br>
<img src="${role}_${ip}_total_time_dist.png" /></p>
<p>${role} ${ip} MS队列中平均花费的时间:${avg_wait_time}us<br>
<img src="${role}_${ip}_queue_time.png" /></p>
$rpcs_html
""", locals()), ob)
return dict(
parse_res = parse_perf,
stat = dict(
sql_count = sql_count,
real_sql_count = real_sql_count,
elapsed_seconds = elapsed_seconds,
sql_time = sql_time,
wait_time = wait_time,
qps2time = qps2time,
sql_time_dist = sql_time_dist,
wait_times = wait_times,
rpcs = rpcs,
)
)
def collect_perf(*args, **ob):
def get_server_list(server_role):
server_list = get_match_child(ob, server_role)
server_list_str = ' '.join('${%s.ip}:${%s.port}'%(k, k) for k in server_list.keys())
return server_list_str
perf_result_dir = "/home/yanran.hfs/public_html/ob_perf/not_published/%s" % (running_ts)
os.mkdir(perf_result_dir)
os.mkdir(local_tmp_dir)
ms_profile = '%s/$role.$ip.profile' % (local_tmp_dir)
all_do(ob, 'mergeserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'chunkserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'updateserver', 'save_gprofile', local_tmp_dir)
all_do(ob, 'mergeserver', 'save_profile', ms_profile)
#call_(ob, 'ms0.save_periodic_pstack', perf_result_dir)
#call_(ob, 'cs0.save_periodic_pstack', perf_result_dir)
#call_(ob, 'ups0.save_periodic_pstack', perf_result_dir)
ms_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ms0.gprofiler_output"), '/')[-1])
cs_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "cs0.gprofiler_output"), '/')[-1])
ups_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ups0.gprofiler_output"), '/')[-1])
ms_gprof = all_do(ob, 'mergeserver', 'pprof_gprofile_output', "bin/mergeserver", ms_gprofile_output, perf_result_dir)
cs_gprof = all_do(ob, 'chunkserver', 'pprof_gprofile_output', "bin/chunkserver", cs_gprofile_output, perf_result_dir)
ups_gprof = all_do(ob, 'updateserver', 'pprof_gprofile_output', "bin/updateserver", ups_gprofile_output, perf_result_dir)
ms_gprof = ''.join([x[1] for x in ms_gprof])
cs_gprof = ''.join([x[1] for x in cs_gprof])
ups_gprof = ''.join([x[1] for x in ups_gprof])
ms_prof = all_do(ob, 'mergeserver', 'parse_profile_log', ms_profile, perf_result_dir)
ms_prof_htmls = ''.join([x[1]['parse_res'] for x in ms_prof])
sql_count = 0
real_sql_count = 0
elapsed_seconds = 0
sql_time = 0
wait_time = 0
qps2time = None
sql_time_dist = None
for ms_tuple in ms_prof:
ms = ms_tuple[1]['stat']
if elapsed_seconds < ms['elapsed_seconds']:
elapsed_seconds = ms['elapsed_seconds']
sql_count += ms['sql_count']
real_sql_count += ms['real_sql_count']
sql_time += ms['sql_time']
wait_time += ms['wait_time']
qps2time = dict_add(qps2time, ms['qps2time'])
sql_time_dist = dict_add(sql_time_dist, ms['sql_time_dist'])
if elapsed_seconds == 0:
qps = 0
avg_sql_time = 0
else:
qps = sql_count / elapsed_seconds
avg_sql_time = float(sql_time) / real_sql_count
if plt is not None:
plt.plot([x for k,x in sorted(qps2time.items())], '-')
plt.xlabel('Timeline')
plt.ylabel('QPS')
plt.savefig("%s/cluster_qps.png" % (perf_result_dir))
plt.clf()
plt.bar(sql_time_dist.keys(), sql_time_dist.values())
plt.xlabel('Response Time (us)')
plt.ylabel('Number of Requests')
plt.savefig("%s/cluster_total_time_dist.png" % (perf_result_dir))
plt.clf()
user_name = find_attr(ob, "usr")
case_name = find_attr(ob, "case")
rs_list = get_server_list("rootserver")
ups_list = get_server_list("updateserver")
ms_list = get_server_list("mergeserver")
cs_list = get_server_list("chunkserver")
server_stats = ""
for ip in get_cluster_ips(*args, **ob):
sh(sub2('scp $usr@%s:$dir/server_stats %s/' % (ip, local_tmp_dir), ob))
server_stats += "<p>%s监控信息:<pre>%s</pre></p>" % (ip, read("%s/server_stats" % (local_tmp_dir)))
result_html_template = ("""
<p>测试人员:${user_name}<br>
测试Case:${case_name}<br>
运行环境:
<ul>
<li>RootServer: ${rs_list}</li>
<li>UpdateServer: ${ups_list}</li>
<li>MergeServer: ${ms_list}</li>
<li>ChunkServer: ${cs_list}</li>
</ul>
测试运行时间:${elapsed_seconds}s<br>
SQL请求次数:$sql_count<br>
集群QPS:$qps</p>
<p>QPS:<br>
<img src="cluster_qps.png" /></p>
<p>平均响应延时:${avg_sql_time}us<br>
<img src="cluster_total_time_dist.png" /></p>
${ms_prof_htmls}
${ms_gprof}
${cs_gprof}
${ups_gprof}
${server_stats}
<p>Server栈信息:
<ul>
<li><a href="ms0">ms0</a></li>
<li><a href="cs0">cs0</a></li>
<li><a href="ups0">ups0</a></li>
</ul></p>
""")
all_vars = copy.copy(ob)
all_vars.update(locals())
with open("%s/index.html" % (perf_result_dir), "w") as f:
f.write(sub2(result_html_template, all_vars))
with open("/home/yanran.hfs/public_html/ob_perf/not_published/index.html", "a") as f:
f.write("""<li><a href="%s/">%s %s %s</a></li>\n""" % (running_ts, running_ts, user_name, case_name))
sh("rm -r %s" % (local_tmp_dir))
return locals()
def perf_environ_setup():
#ob['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['gprofiler_output'] = "$dir/$ip.gprofiler.output"
#ob['environ_extras'] = "PROFILE_SAMPLE_INTERVAL=$ptime"
#ob['ms0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['ms0']['gprofiler_output'] = "$dir/ms0.gprofiler.output"
#ob['ms0']['environ_extras'] = sub2("PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime", ob)
#ob['cs0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['cs0']['gprofiler_output'] = "$dir/cs0.gprofiler.output"
#ob['cs0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output"
#ob['ups0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#ob['ups0']['gprofiler_output'] = "$dir/ups0.gprofiler.output"
#ob['ups0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output"
#obi_vars.update(dict(environ_extras = "PROFILE_SAMPLE_INTERVAL=$ptime"))
obi_vars['gprofiler_output'] = "$dir/$ip.$role.gprofiler.output"
obi_vars['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime"
obi_vars['server_ld_preload'] = "$dir/lib/libprofiler_helper.so"
#call_(ob, 'ms0.kill', '-50')
def perf_install():
client_vars.update(dict_filter_out_special_attrs(perf_client_attrs()))
ct_vars.update(dict_filter_out_special_attrs(perf_ct_attrs()))
role_vars.update(dict_filter_out_special_attrs(perf_role_attrs()))
obi_vars.update(dict_filter_out_special_attrs(perf_obi_attrs()))
perf_environ_setup()
perf_install()
| gpl-2.0 |
kmather73/ggplot | ggplot/tests/test_faceting.py | 12 | 1894 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup
assert_same_ggplot = get_assert_same_ggplot(__file__)
from ggplot import *
import numpy as np
import pandas as pd
def _build_testing_df():
df = pd.DataFrame({
"x": np.arange(0, 10),
"y": np.arange(0, 10),
"z": np.arange(0, 10),
"a": [1,1,1,1,1,2,2,2,3,3]
})
df['facets'] = np.where(df.x > 4, 'over', 'under')
df['facets2'] = np.where((df.x % 2) == 0, 'even', 'uneven')
return df
def _build_small_df():
return pd.DataFrame({
"x": [1, 2, 1, 2],
"y": [1, 2, 3, 4],
"a": ["a", "b", "a", "b"],
"b": ["c", "c", "d", "d"]
})
# faceting with bar plots does not work yet: see https://github.com/yhat/ggplot/issues/196
#@cleanup
#def test_facet_grid_descrete():
# df = _build_testing_df()
# gg = ggplot(aes(x='a'), data=df)
# assert_same_ggplot(gg + geom_bar() + facet_grid(x="facets", y="facets2"),
# "faceting_grid_descrete")
#@cleanup
#def test_facet_wrap_descrete():
# df = _build_testing_df()
# gg = ggplot(aes(x='a'), data=df)
# assert_same_ggplot(gg + geom_bar() + facet_wrap(x="facets"), "faceting_wrap_descrete")
@cleanup
def test_facet_grid_continous():
df = _build_testing_df()
p = ggplot(aes(x='x', y='y', colour='z'), data=df)
p = p + geom_point() + scale_colour_gradient(low="blue", high="red")
p = p + facet_grid("facets", "facets2")
assert_same_ggplot(p, "faceting_grid_continous")
@cleanup
def test_facet_wrap_continous():
df = _build_testing_df()
p = ggplot(aes(x='x', y='y', colour='z'), data=df)
p = p + geom_point() + scale_colour_gradient(low="blue", high="red")
p = p + facet_wrap("facets")
assert_same_ggplot(p, "faceting_wrap_continous")
| bsd-2-clause |
amaurywalbert/twitter | statistics/histograms/n9_egos_histograms.py | 1 | 6593 | # -*- coding: latin1 -*-
################################################################################################
#
#
import tweepy, datetime, sys, time, json, os, os.path, shutil, time, struct, random
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pylab
import numpy as np
import powerlaw
import seaborn as sns
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
reload(sys)
sys.setdefaultencoding('utf-8')
######################################################################################################################################################################
## Status - Versão 1 - Gera histogramas para a rede N9 - seguidores do ego
##
######################################################################################################################################################################
################################################################################################
# Imprime os arquivos binários com os ids dos seguidores
################################################################################################
def read_arq_bin(file):
with open(file, 'r') as f:
f.seek(0,2)
tamanho = f.tell()
f.seek(0)
followers_list = []
while f.tell() < tamanho:
buffer = f.read(user_struct.size)
friend = user_struct.unpack(buffer)
followers_list.append(friend[0])
return followers_list
######################################################################################################################################################################
# HTML
######################################################################################################################################################################
def dynamic_histogram(data):
print ("Criando histograma dinâmico...")
normalized = [go.Histogram(x=data,marker=dict(color='black'))]
plotly.offline.plot(normalized, filename=output_dir_html+"followers_hist_k_"+str(k)+".html")
print ("OK")
print
######################################################################################################################################################################
# Histograma
######################################################################################################################################################################
def histogram(data):
print ("Criando histograma...")
plt.hist(data,bins=bins,label="k = "+str(k)+" - "+str(len(data))+" egos",color='black')
plt.xlabel ("Followers")
plt.ylabel ("Egos")
plt.title ("Rede de Seguidores - Número de seguidores por ego")
plt.legend(loc='best')
plt.savefig(output_dir+"followers_hist_k_"+str(k)+".png")
plt.close()
plt.hist(data,bins=bins,label="k = "+str(k)+" - "+str(len(data))+" egos",color='black')
plt.xlabel ("Followers")
plt.xlim([0, axis_x_limit])
plt.ylabel ("Egos")
plt.title ("Rede de Seguidores - Número de seguidores por ego")
plt.legend(loc='best')
plt.savefig(output_dir_zoom+"followers_hist_k_"+str(k)+".png")
plt.close()
print ("OK!")
print
######################################################################################################################################################################
######################################################################################################################################################################
#
# Método principal do programa.
#
######################################################################################################################################################################
######################################################################################################################################################################
def main():
n_followers=[]
print ("Preparando dados...")
with open(input_file, 'r') as infile:
intersection = json.load(infile)
for user in intersection:
followers_list = read_arq_bin(data_dir+str(user)+".dat") # Função para converter o binário de volta em string em formato json.
n_followers.append(len(followers_list))
print ("Total de usuários ego: "+str(len(n_followers)))
print ("OK!")
print
histogram(n_followers)
dynamic_histogram(n_followers)
#####################################################################################################################################################################
#
# INÍCIO DO PROGRAMA
#
######################################################################################################################################################################
################################### CONFIGURAR AS LINHAS A SEGUIR ####################################################
######################################################################################################################
qtde_egos = 'full_with_prunned' ########################################## 10, 50, 100, 500 ou full ou full_with_prunned
bins=1500 ################################################################# Quantidade de barras no histograma
axis_x_limit = 100000 ################################################### Limite para eixo x (zoom)
formato = 'l' ################################################### Long para id do seguidor
user_struct = struct.Struct(formato) ###################################### Inicializa o objeto do tipo struct para poder armazenar o formato específico no arquivo binário
######################################################################################################################
threshold = [0,10,20,30,40,50,100,200]
for i in range(len(threshold)):
k = threshold[i]
print ("Gerando gráficos com k = "+str(k))
input_file = "/home/amaury/coleta/subconjunto/"+str(qtde_egos)+"/intersection_k_"+str(k)+".txt"
data_dir = "/home/amaury/coleta/n9/egos_followers/"+str(qtde_egos)+"/bin/"
output_dir = "/home/amaury/coleta/statistics/n9/"+str(qtde_egos)+"/"
output_dir_zoom = "/home/amaury/coleta/statistics/n9/"+str(qtde_egos)+"/zoom/"
output_dir_html = "/home/amaury/coleta/statistics/n9/"+str(qtde_egos)+"/html/"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir_zoom):
os.makedirs(output_dir_zoom)
if not os.path.exists(output_dir_html):
os.makedirs(output_dir_html)
#Executa o método main
if __name__ == "__main__": main()
print("######################################################################")
print("Script finalizado!")
print("######################################################################\n") | gpl-3.0 |
leggitta/mne-python | examples/stats/plot_cluster_stats_evoked.py | 18 | 2991 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
linebp/pandas | pandas/tests/indexes/period/test_partial_slicing.py | 19 | 5909 | import pytest
import numpy as np
import pandas as pd
from pandas.util import testing as tm
from pandas import (Series, period_range, DatetimeIndex, PeriodIndex,
DataFrame, _np_version_under1p12, Period)
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(ts[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
tm.assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1],
SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
tm.assert_raises_regex(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
assert idx.name == idx[1:].name
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
tm.assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
tm.assert_series_equal(res, exp)
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
# changed to TypeError in 1.12
# https://github.com/numpy/numpy/pull/6271
exc = IndexError if _np_version_under1p12 else TypeError
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with pytest.raises(exc):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/02':], s[1:])
tm.assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
tm.assert_series_equal(s['2013/02':], s[31:])
tm.assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(exc):
idx[v:]
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
# changed to TypeError in 1.12
# https://github.com/numpy/numpy/pull/6271
exc = IndexError if _np_version_under1p12 else TypeError
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
with pytest.raises(exc):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'],
s[300:660])
tm.assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'],
s[3600:3960])
tm.assert_series_equal(s['2013/01/01 10H':], s[3600:])
tm.assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'),
columns=['units'])
empty['units'] = empty['units'].astype('int64')
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
| bsd-3-clause |
dhwang99/statistics_introduction | hypothetical_test/test_contain.py | 1 | 2067 | #encoding: utf8
import numpy as np
from scipy.misc import comb
from scipy.stats import norm
import matplotlib.pyplot as plt
import pdb
'''
容量和alpha, beta都有关, 和delta有关。一般delta取一个sigma
err1 = alpha = 0.1
err2 = beta = 0.2
正态样本容量,用来控制第二类错误(这么说还好不对)
delta 默认为一个标准差
Phi((c - mu0)*sqrt(n)/sigma) <= (1-alpha)
c <= ppf(1-alpha) * sigma/sqrt(n) + mu0
ds = delta / sigma #delta,sigma指定后,这个就确定了。引入这个变量主要是方例后继的计算
mu = mu0 + delta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu0 - delta)*sqrt(n)/sigma) < beta
(c - mu0 - delta)*sqrt(n)/sigma) < Phi(beta)
(c - mu0 - delta)*sqrt(n)/sigma) < -Phi(1-beta)
#c取最大值,有
(Za * sigma/sqrt(n) + mu0 - mu0 - delta)*sqrt(n)/sigma) < -Zbeta
Za - delta*sqrt(n)/sigma < -Zbeta
sqrt(n) > (Za + Zbeta) * sigma/delta
'''
def norm_sample_contain(sigma, delta, max_alpha=0.1, max_beta=0.2):
if delta == None:
delta = sigma
Za = norm.ppf(1-max_alpha)
Zb = norm.ppf(1-max_beta)
min_sqrt_n = (Za + Zb) * sigma / delta
n = np.ceil(min_sqrt_n ** 2)
return n
'''
计算正态分布下的p值
'''
def p_value():
return None
if __name__ == "__main__":
colors = ['g', 'b', 'k']
#test contain of samples
mu0 = 0
sigma = 1.
betas = np.linspace(0.01, 0.3, num=50)
contains = np.zeros(len(betas))
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.1, max_beta=beta)
contains[i] = n
plt.clf()
plt.plot(betas, contains, color='r')
print "betas:", betas
print "n:", contains
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.05, max_beta=beta)
contains[i] = n
plt.plot(betas, contains, color='k')
print "betas:", betas
print "n:", contains
plt.savefig('images/norm_contain.png', format='png')
| gpl-3.0 |
wzbozon/scikit-learn | benchmarks/bench_covertype.py | 120 | 7381 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
jat255/hyperspy | hyperspy/utils/peakfinders2D.py | 1 | 19791 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import scipy.ndimage as ndi
from numba import jit
from skimage.feature import (
peak_local_max, blob_dog, blob_log, corner_peaks, match_template)
import copy
NO_PEAKS = np.array([[np.nan, np.nan]])
@jit(nopython=True, cache=True)
def _fast_mean(X):
"""JIT-compiled mean of array.
Parameters
----------
X : numpy.ndarray
Input array.
Returns
-------
mean : float
Mean of X.
Notes
-----
Used by scipy.ndimage.generic_filter in the find_peaks_stat
method to reduce overhead of repeated Python function calls.
See https://github.com/scipy/scipy/issues/8916 for more details.
"""
return np.mean(X)
@jit(nopython=True, cache=True)
def _fast_std(X):
"""JIT-compiled standard deviation of array.
Parameters
----------
X : numpy.ndarray
Input array.
Returns
-------
std : float
Standard deviation of X.
Notes
-----
Used by scipy.ndimage.generic_filter in the find_peaks_stat
method to reduce overhead of repeated Python function calls.
See https://github.com/scipy/scipy/issues/8916 for more details.
"""
return np.std(X)
def clean_peaks(peaks):
"""Sort array of peaks and deal with no peaks being found.
Parameters
----------
peaks : numpy.ndarray
Array of found peaks.
Returns
-------
peaks : numpy.ndarray
Sorted array, first by `peaks[:,1]` (y-coordinate) then by `peaks[:,0]`
(x-coordinate), of found peaks.
NO_PEAKS : str
Flag indicating no peaks found.
"""
if len(peaks) == 0:
return NO_PEAKS
else:
ind = np.lexsort((peaks[:,0], peaks[:,1]))
return peaks[ind]
def find_local_max(z, **kwargs):
"""Method to locate positive peaks in an image by local maximum searching.
This function wraps :py:func:`skimage.feature.peak_local_max` function and
sorts the results for consistency with other peak finding methods.
z : numpy.ndarray
Array of image intensities.
**kwargs
Keyword arguments to be passed to the ``peak_local_max`` method of
the ``scikit-image`` library. See its documentation for details:
http://scikit-image.org/docs/dev/api/skimage.feature.html#peak-local-max
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
"""
peaks = peak_local_max(z, **kwargs)
return clean_peaks(peaks)
def find_peaks_minmax(z, distance=5., threshold=10.):
"""Method to locate the positive peaks in an image by comparing maximum
and minimum filtered images.
Parameters
----------
z : numpy.ndarray
Matrix of image intensities.
distance : float
Expected distance between peaks.
threshold : float
Minimum difference between maximum and minimum filtered images.
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
"""
data_max = ndi.filters.maximum_filter(z, distance)
maxima = (z == data_max)
data_min = ndi.filters.minimum_filter(z, distance)
diff = ((data_max - data_min) > threshold)
maxima[diff == 0] = 0
labeled, num_objects = ndi.label(maxima)
peaks = np.array(
ndi.center_of_mass(z, labeled, range(1, num_objects + 1)))
return clean_peaks(np.round(peaks).astype(int))
def find_peaks_max(z, alpha=3., distance=10):
"""Method to locate positive peaks in an image by local maximum searching.
Parameters
----------
alpha : float
Only maxima above `alpha * sigma` are found, where `sigma` is the
standard deviation of the image.
distance : int
When a peak is found, all pixels in a square region of side
`2 * distance` are set to zero so that no further peaks can be found
in that region.
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
"""
# preallocate lots of peak storage
k_arr = []
# copy image
image_temp = copy.deepcopy(z)
peak_ct = 0
# calculate standard deviation of image for thresholding
sigma = np.std(z)
while True:
k = np.argmax(image_temp)
j, i = np.unravel_index(k, image_temp.shape)
if image_temp[j, i] >= alpha * sigma:
k_arr.append([j, i])
# masks peaks already identified.
x = np.arange(i - distance, i + distance)
y = np.arange(j - distance, j + distance)
xv, yv = np.meshgrid(x, y)
# clip to handle peaks near image edge
image_temp[yv.clip(0, image_temp.shape[0] - 1),
xv.clip(0, image_temp.shape[1] - 1)] = 0
peak_ct += 1
else:
break
peaks = np.array(k_arr)
return clean_peaks(peaks)
def find_peaks_zaefferer(z, grad_threshold=0.1, window_size=40,
distance_cutoff=50.):
"""Method to locate positive peaks in an image based on gradient
thresholding and subsequent refinement within masked regions.
Parameters
----------
z : ndarray
Matrix of image intensities.
grad_threshold : float
The minimum gradient required to begin a peak search.
window_size : int
The size of the square window within which a peak search is
conducted. If odd, will round down to even. The size must be larger
than 2.
distance_cutoff : float
The maximum distance a peak may be from the initial
high-gradient point.
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
Notes
-----
Implemented as described in Zaefferer "New developments of computer-aided
crystallographic analysis in transmission electron microscopy" J. Ap. Cryst.
This version by Ben Martineau (2016)
"""
def box(x, y, window_size, x_max, y_max):
"""Produces a list of coordinates in the box about (x, y)."""
a = int(window_size / 2)
x_min = max(0, x - a)
x_max = min(x_max, x + a)
y_min = max(0, y - a)
y_max = min(y_max, y + a)
return np.mgrid[x_min:x_max, y_min:y_max].reshape(2, -1, order="F")
def get_max(image, box):
"""Finds the coordinates of the maximum of 'image' in 'box'."""
vals = image[tuple(box)]
ind = np.argmax(vals)
return tuple(box[:, ind])
def squared_distance(x, y):
"""Calculates the squared distance between two points."""
return (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2
def gradient(image):
"""Calculates the square of the 2-d partial gradient.
Parameters
----------
image : numpy.ndarray
The image for which the gradient will be calculated.
Returns
-------
gradient_of_image : numpy.ndarray
The gradient of the image.
"""
gradient_of_image = np.gradient(image)
gradient_of_image = gradient_of_image[0] ** 2 + gradient_of_image[
1] ** 2
return gradient_of_image
# Check window size is appropriate.
if window_size < 2:
raise ValueError("`window_size` must be >= 2.")
# Generate an ordered list of matrix coordinates.
if len(z.shape) != 2:
raise ValueError("'z' should be a 2-d image matrix.")
z = z / np.max(z)
coordinates = np.indices(z.data.shape).reshape(2, -1).T
# Calculate the gradient at every point.
image_gradient = gradient(z)
# Boolean matrix of high-gradient points.
coordinates = coordinates[(image_gradient >= grad_threshold).flatten()]
# Compare against squared distance (avoids repeated sqrt calls)
distance_cutoff_sq = distance_cutoff ** 2
peaks = []
for coordinate in coordinates:
# Iterate over coordinates where the gradient is high enough.
b = box(coordinate[0], coordinate[1], window_size, z.shape[0],
z.shape[1])
p_old = (0, 0)
p_new = get_max(z, b)
while p_old[0] != p_new[0] and p_old[1] != p_new[1]:
p_old = p_new
b = box(p_old[0], p_old[1], window_size, z.shape[0], z.shape[1])
p_new = get_max(z, b)
if squared_distance(coordinate, p_new) > distance_cutoff_sq:
break
peaks.append(p_new)
peaks = np.array([p for p in set(peaks)])
return clean_peaks(peaks)
def find_peaks_stat(z, alpha=1.0, window_radius=10, convergence_ratio=0.05):
"""Method to locate positive peaks in an image based on statistical
refinement and difference with respect to mean intensity.
Parameters
----------
z : numpy.ndarray
Array of image intensities.
alpha : float
Only maxima above `alpha * sigma` are found, where `sigma` is the
local, rolling standard deviation of the image.
window_radius : int
The pixel radius of the circular window for the calculation of the
rolling mean and standard deviation.
convergence_ratio : float
The algorithm will stop finding peaks when the proportion of new peaks
being found is less than `convergence_ratio`.
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
Notes
-----
Implemented as described in the PhD thesis of Thomas White, University of
Cambridge, 2009, with minor modifications to resolve ambiguities.
The algorithm is as follows:
1. Adjust the contrast and intensity bias of the image so that all pixels
have values between 0 and 1.
2. For each pixel, determine the mean and standard deviation of all pixels
inside a circle of radius 10 pixels centered on that pixel.
3. If the value of the pixel is greater than the mean of the pixels in the
circle by more than one standard deviation, set that pixel to have an
intensity of 1. Otherwise, set the intensity to 0.
4. Smooth the image by convovling it twice with a flat 3x3 kernel.
5. Let k = (1/2 - mu)/sigma where mu and sigma are the mean and standard
deviations of all the pixel intensities in the image.
6. For each pixel in the image, if the value of the pixel is greater than
mu + k*sigma set that pixel to have an intensity of 1. Otherwise, set the
intensity to 0.
7. Detect peaks in the image by locating the centers of gravity of regions
of adjacent pixels with a value of 1.
8. Repeat #4-7 until the number of peaks found in the previous step
converges to within the user defined convergence_ratio.
"""
try:
from sklearn.cluster import DBSCAN
except ImportError:
raise ImportError("This method requires scikit-learn.")
def normalize(image):
"""Scales the image to intensities between 0 and 1."""
return image / np.max(image)
def _local_stat(image, radius, func):
"""Calculates rolling method 'func' over a circular kernel."""
x, y = np.ogrid[-radius : radius + 1, -radius : radius + 1]
kernel = np.hypot(x, y) < radius
stat = ndi.filters.generic_filter(image, func, footprint=kernel)
return stat
def local_mean(image, radius):
"""Calculates rolling mean over a circular kernel."""
return _local_stat(image, radius, _fast_mean)
def local_std(image, radius):
"""Calculates rolling standard deviation over a circular kernel."""
return _local_stat(image, radius, _fast_std)
def single_pixel_desensitize(image):
"""Reduces single-pixel anomalies by nearest-neighbor smoothing."""
kernel = np.array([[0.5, 1, 0.5], [1, 1, 1], [0.5, 1, 0.5]])
smoothed_image = ndi.filters.generic_filter(image, _fast_mean, footprint=kernel)
return smoothed_image
def stat_binarise(image):
"""Peaks more than one standard deviation from the mean set to one."""
image_rolling_mean = local_mean(image, window_radius)
image_rolling_std = local_std(image, window_radius)
image = single_pixel_desensitize(image)
binarised_image = np.zeros(image.shape)
stat_mask = image > (image_rolling_mean + alpha * image_rolling_std)
binarised_image[stat_mask] = 1
return binarised_image
def smooth(image):
"""Image convolved twice using a uniform 3x3 kernel."""
image = ndi.filters.uniform_filter(image, size=3)
image = ndi.filters.uniform_filter(image, size=3)
return image
def half_binarise(image):
"""Image binarised about values of one-half intensity."""
binarised_image = np.where(image > 0.5, 1, 0)
return binarised_image
def separate_peaks(binarised_image):
"""Identify adjacent 'on' coordinates via DBSCAN."""
bi = binarised_image.astype("bool")
coordinates = np.indices(bi.shape).reshape(2, -1).T[bi.flatten()]
db = DBSCAN(2, 3)
peaks = []
if coordinates.shape[0] > 0: # we have at least some peaks
labeled_points = db.fit_predict(coordinates)
for peak_label in list(set(labeled_points)):
peaks.append(coordinates[labeled_points == peak_label])
return peaks
def _peak_find_once(image):
"""Smooth, binarise, and find peaks according to main algorithm."""
image = smooth(image) # 4
image = half_binarise(image) # 5
peaks = separate_peaks(image) # 6
centers = np.array([np.mean(peak, axis=0) for peak in peaks]) # 7
return image, centers
def stat_peak_finder(image, convergence_ratio):
"""Find peaks in image. Algorithm stages in comments."""
# Image preparation
image = normalize(image) # 1
image = stat_binarise(image) # 2, 3
# Perform first iteration of peak finding
image, peaks_curr = _peak_find_once(image) # 4-7
n_peaks = len(peaks_curr)
if n_peaks == 0:
return peaks_curr
m_peaks = 0
# Repeat peak finding with more blurring to convergence
while (n_peaks - m_peaks) / n_peaks > convergence_ratio: # 8
m_peaks = n_peaks
peaks_old = np.copy(peaks_curr)
image, peaks_curr = _peak_find_once(image)
n_peaks = len(peaks_curr)
if n_peaks == 0:
return peaks_old
return peaks_curr
return clean_peaks(stat_peak_finder(z, convergence_ratio))
def find_peaks_dog(z, min_sigma=1., max_sigma=50., sigma_ratio=1.6,
threshold=0.2, overlap=0.5, exclude_border=False):
"""Method to locate peaks via the Difference of Gaussian Matrices method.
This function wraps :py:func:`skimage.feature.blob_dog` function and
sorts the results for consistency with other peak finding methods.
Parameters
----------
z : numpy.ndarray
2-d array of intensities
min_sigma, max_sigma, sigma_ratio, threshold, overlap, exclude_border :
Additional parameters to be passed to the algorithm. See `blob_dog`
documentation for details:
http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-dog
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
Notes
-----
While highly effective at finding even very faint peaks, this method is
sensitive to fluctuations in intensity near the edges of the image.
"""
z = z / np.max(z)
blobs = blob_dog(z, min_sigma=min_sigma, max_sigma=max_sigma,
sigma_ratio=sigma_ratio, threshold=threshold,
overlap=overlap, exclude_border=exclude_border)
try:
centers = np.round(blobs[:, :2]).astype(int)
except IndexError:
return NO_PEAKS
clean_centers = []
for center in centers:
if len(np.intersect1d(center, (0, 1) + z.shape + tuple(
c - 1 for c in z.shape))) > 0:
continue
clean_centers.append(center)
peaks = np.array(clean_centers)
ind = np.lexsort((peaks[:,0], peaks[:,1]))
return peaks[ind]
def find_peaks_log(z, min_sigma=1., max_sigma=50., num_sigma=10,
threshold=0.2, overlap=0.5, log_scale=False,
exclude_border=False):
"""Method to locate peaks via the Laplacian of Gaussian Matrices method.
This function wraps :py:func:`skimage.feature.blob_log` function and
sorts the results for consistency with other peak finding methods.
Parameters
----------
z : numpy.ndarray
Array of image intensities.
min_sigma, max_sigma, num_sigma, threshold, overlap, log_scale, exclude_border :
Additional parameters to be passed to the ``blob_log`` method of the
``scikit-image`` library. See its documentation for details:
http://scikit-image.org/docs/dev/api/skimage.feature.html#blob-log
Returns
-------
peaks : numpy.ndarray
(n_peaks, 2)
Peak pixel coordinates.
"""
z = z / np.max(z)
if isinstance(num_sigma, float):
raise ValueError("`num_sigma` parameter should be an integer.")
blobs = blob_log(z, min_sigma=min_sigma, max_sigma=max_sigma,
num_sigma=num_sigma, threshold=threshold, overlap=overlap,
log_scale=log_scale, exclude_border=exclude_border)
# Attempt to return only peak positions. If no peaks exist, return an
# empty array.
try:
centers = np.round(blobs[:, :2]).astype(int)
ind = np.lexsort((centers[:,0], centers[:,1]))
except IndexError:
return NO_PEAKS
return centers[ind]
def find_peaks_xc(z, template, distance=5, threshold=0.5, **kwargs):
"""Find peaks in the cross correlation between the image and a template by
using the :py:func:`~hyperspy.utils.peakfinders2D.find_peaks_minmax` function
to find the peaks on the cross correlation result obtained using the
:py:func:`skimage.feature.match_template` function.
Parameters
----------
z : numpy.ndarray
Array of image intensities.
template : numpy.ndarray (square)
Array containing a single bright disc, similar to those to detect.
distance : float
Expected distance between peaks.
threshold : float
Minimum difference between maximum and minimum filtered images.
**kwargs : dict
Keyword arguments to be passed to the
:py:func:`skimage.feature.match_template` function.
Returns
-------
numpy.ndarray
(n_peaks, 2)
Array of peak coordinates.
"""
pad_input = kwargs.pop('pad_input', True)
response_image = match_template(z, template, pad_input=pad_input, **kwargs)
peaks = find_peaks_minmax(response_image,
distance=distance,
threshold=threshold)
return clean_peaks(peaks)
| gpl-3.0 |
aebrahim/cobrapy | cobra/flux_analysis/double_deletion.py | 3 | 23281 | from warnings import warn
from itertools import chain, product
from six import iteritems
import numpy
from ..solvers import get_solver_name, solver_dict
from ..manipulation.delete import (find_gene_knockout_reactions,
get_compiled_gene_reaction_rules)
from .deletion_worker import CobraDeletionPool, CobraDeletionMockPool
try:
import scipy
except ImportError:
moma = None
else:
from . import moma
try:
from pandas import DataFrame
except:
DataFrame = None
# Utility functions
def generate_matrix_indexes(ids1, ids2):
"""map an identifier to an entry in the square result matrix"""
return {id: index for index, id in enumerate(set(chain(ids1, ids2)))}
def yield_upper_tria_indexes(ids1, ids2, id_to_index):
"""gives the necessary indexes in the upper triangle
ids1 and ids2 are lists of the identifiers i.e. gene id's or reaction
indexes to be knocked out. id_to_index maps each identifier to its index
in the result matrix.
Note that this does not return indexes for the diagonal. Those have
to be computed separately."""
# sets to check for inclusion in o(1)
id_set1 = set(ids1)
id_set2 = set(ids2)
for id1, id2 in product(ids1, ids2):
# indexes in the result matrix
index1 = id_to_index[id1]
index2 = id_to_index[id2]
# upper triangle
if index2 > index1:
yield ((index1, index2), (id1, id2))
# lower triangle but would be skipped, so return in upper triangle
elif id2 not in id_set1 or id1 not in id_set2:
yield((index2, index1), (id2, id1)) # note that order flipped
def _format_upper_triangular_matrix(row_indexes, column_indexes, matrix):
"""reformat the square upper-triangular result matrix
For example, results may look like this
[[ A B C D]
[ - - - -]
[ - - E F]
[ - - - G]]
In this case, the second row was skipped. This means we have
row_indexes [0, 2, 3] and column_indexes [0, 1, 2, 3]
First, it will reflect the upper triangle into the lower triangle
[[ A B C D]
[ B - - -]
[ C - E F]
[ D - F G]]
Finally, it will remove the missing rows and return
[[ A B C D]
[ C - E F]
[ D - F G]]
"""
results = matrix.copy()
# Thse select the indexes for the upper triangle. However, switching
# the order selects the lower triangle.
triu1, triu2 = numpy.triu_indices(matrix.shape[0])
# This makes reflection pretty easy
results[triu2, triu1] = results[triu1, triu2]
# Remove the missing rows and return.
return results[row_indexes, :][:, column_indexes]
def format_results_frame(row_ids, column_ids, matrix, return_frame=False):
"""format results as a pandas.DataFrame if desired/possible
Otherwise returns a dict of
{"x": row_ids, "y": column_ids", "data": result_matrx}"""
if return_frame and DataFrame:
return DataFrame(data=matrix, index=row_ids, columns=column_ids)
elif return_frame and not DataFrame:
warn("could not import pandas.DataFrame")
return {"x": row_ids, "y": column_ids, "data": matrix}
def double_deletion(cobra_model, element_list_1=None, element_list_2=None,
element_type='gene', **kwargs):
"""Wrapper for double_gene_deletion and double_reaction_deletion
.. deprecated :: 0.4
Use double_reaction_deletion and double_gene_deletion
"""
warn("deprecated - use single_reaction_deletion and single_gene_deletion")
if element_type == "reaction":
return double_reaction_deletion(cobra_model, element_list_1,
element_list_2, **kwargs)
elif element_type == "gene":
return double_gene_deletion(cobra_model, element_list_1,
element_list_2, **kwargs)
else:
raise Exception("unknown element type")
def double_reaction_deletion(cobra_model,
reaction_list1=None, reaction_list2=None,
method="fba", return_frame=False,
solver=None, zero_cutoff=1e-12,
**kwargs):
"""sequentially knocks out pairs of reactions in a model
cobra_model : :class:`~cobra.core.Model.Model`
cobra model in which to perform deletions
reaction_list1 : [:class:`~cobra.core.Reaction.Reaction`:] (or their id's)
Reactions to be deleted. These will be the rows in the result.
If not provided, all reactions will be used.
reaction_list2 : [:class:`~cobra.core.Reaction`:] (or their id's)
Reactions to be deleted. These will be the rows in the result.
If not provided, reaction_list1 will be used.
method: "fba" or "moma"
Procedure used to predict the growth rate
solver: str for solver name
This must be a QP-capable solver for MOMA. If left unspecified,
a suitable solver will be automatically chosen.
zero_cutoff: float
When checking to see if a value is 0, this threshold is used.
return_frame: bool
If true, formats the results as a pandas.Dataframe. Otherwise
returns a dict of the form:
{"x": row_labels, "y": column_labels", "data": 2D matrix}
"""
# handle arguments which need to be passed on
if solver is None:
solver = get_solver_name(qp=(method == "moma"))
kwargs["solver"] = solver
kwargs["zero_cutoff"] = zero_cutoff
# generate other arguments
# identifiers for reactions are their indexes
if reaction_list1 is None:
reaction_indexes1 = range(len(cobra_model.reactions))
else:
reaction_indexes1 = [cobra_model.reactions.index(r)
for r in reaction_list1]
if reaction_list2 is None:
reaction_indexes2 = reaction_indexes1
else:
reaction_indexes2 = [cobra_model.reactions.index(r)
for r in reaction_list2]
reaction_to_result = generate_matrix_indexes(reaction_indexes1,
reaction_indexes2)
# Determine 0 flux reactions. If an optimal solution passes no flux
# through the deleted reactions, then we know removing them will
# not change the solution.
wt_solution = solver_dict[solver].solve(cobra_model)
if wt_solution.status == "optimal":
kwargs["wt_growth_rate"] = wt_solution.f
kwargs["no_flux_reaction_indexes"] = \
{i for i, v in enumerate(wt_solution.x) if abs(v) < zero_cutoff}
else:
warn("wild-type solution status is '%s'" % wt_solution.status)
# call the computing functions
if method == "fba":
results = _double_reaction_deletion_fba(
cobra_model, reaction_indexes1, reaction_indexes2,
reaction_to_result, **kwargs)
elif method == "moma":
results = _double_reaction_deletion_moma(
cobra_model, reaction_indexes1, reaction_indexes2,
reaction_to_result, **kwargs)
else:
raise ValueError("Unknown deletion method '%s'" % method)
# convert upper triangular matrix to full matrix
full_result = _format_upper_triangular_matrix(
[reaction_to_result[i] for i in reaction_indexes1], # row indexes
[reaction_to_result[i] for i in reaction_indexes2], # col indexes
results)
# format appropriately with labels
row_ids = [cobra_model.reactions[i].id for i in reaction_indexes1]
column_ids = [cobra_model.reactions[i].id for i in reaction_indexes2]
return format_results_frame(row_ids, column_ids,
full_result, return_frame)
def double_gene_deletion(cobra_model,
gene_list1=None, gene_list2=None,
method="fba", return_frame=False,
solver=None, zero_cutoff=1e-12,
**kwargs):
"""sequentially knocks out pairs of genes in a model
cobra_model : :class:`~cobra.core.Model.Model`
cobra model in which to perform deletions
gene_list1 : [:class:`~cobra.core.Gene.Gene`:] (or their id's)
Genes to be deleted. These will be the rows in the result.
If not provided, all reactions will be used.
gene_list1 : [:class:`~cobra.core.Gene.Gene`:] (or their id's)
Genes to be deleted. These will be the rows in the result.
If not provided, reaction_list1 will be used.
method: "fba" or "moma"
Procedure used to predict the growth rate
solver: str for solver name
This must be a QP-capable solver for MOMA. If left unspecified,
a suitable solver will be automatically chosen.
zero_cutoff: float
When checking to see if a value is 0, this threshold is used.
number_of_processes: int for number of processes to use.
If unspecified, the number of parallel processes to use will be
automatically determined. Setting this to 1 explicitly disables used
of the multiprocessing library.
.. note:: multiprocessing is not supported with method=moma
return_frame: bool
If true, formats the results as a pandas.Dataframe. Otherwise
returns a dict of the form:
{"x": row_labels, "y": column_labels", "data": 2D matrix}
"""
# handle arguments which need to be passed on
if solver is None:
solver = get_solver_name(qp=(method == "moma"))
kwargs["solver"] = solver
kwargs["zero_cutoff"] = zero_cutoff
# generate other arguments
# identifiers for genes
if gene_list1 is None:
gene_ids1 = cobra_model.genes.list_attr("id")
else:
gene_ids1 = [str(i) for i in gene_list1]
if gene_list2 is None:
gene_ids2 = gene_ids1
else:
gene_ids2 = [str(i) for i in gene_list2]
# The gene_id_to_result dict will map each gene id to the index
# in the result matrix.
gene_id_to_result = generate_matrix_indexes(gene_ids1, gene_ids2)
# Determine 0 flux reactions. If an optimal solution passes no flux
# through the deleted reactions, then we know removing them will
# not change the solution.
wt_solution = solver_dict[solver].solve(cobra_model)
if wt_solution.status == "optimal":
kwargs["wt_growth_rate"] = wt_solution.f
kwargs["no_flux_reaction_indexes"] = \
{i for i, v in enumerate(wt_solution.x) if abs(v) < zero_cutoff}
else:
warn("wild-type solution status is '%s'" % wt_solution.status)
if method == "fba":
result = _double_gene_deletion_fba(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, **kwargs)
elif method == "moma":
result = _double_gene_deletion_moma(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, **kwargs)
else:
raise ValueError("Unknown deletion method '%s'" % method)
# convert upper triangular matrix to full matrix
full_result = _format_upper_triangular_matrix(
[gene_id_to_result[id] for id in gene_ids1], # row indexes
[gene_id_to_result[id] for id in gene_ids2], # col indexes,
result)
# format as a Dataframe if required
return format_results_frame(gene_ids1, gene_ids2,
full_result, return_frame)
def _double_reaction_deletion_fba(cobra_model, reaction_indexes1,
reaction_indexes2, reaction_to_result,
solver, number_of_processes=None,
zero_cutoff=1e-15, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double reaction deletions using fba
cobra_model: model
reaction_indexes1, reaction_indexes2: reaction indexes (used as unique
identifiers)
reaction_to_result: maps each reaction identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
if solver is None:
solver = get_solver_name()
# generate the square result matrix
n_results = len(reaction_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
PoolClass = CobraDeletionMockPool if number_of_processes == 1 \
else CobraDeletionPool # explicitly disable multiprocessing
with PoolClass(cobra_model, n_processes=number_of_processes,
solver=solver, **kwargs) as pool:
# precompute all single deletions in the pool and store them along
# the diagonal
for reaction_index, result_index in iteritems(reaction_to_result):
pool.submit((reaction_index, ), label=result_index)
for result_index, value in pool.receive_all():
# if singly lethal, set everything in row and column to 0
value = value if abs(value) > zero_cutoff else 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
else: # only the diagonal needs to be set
results[result_index, result_index] = value
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(
reaction_indexes1, reaction_indexes2, reaction_to_result)
for result_index, (r1_index, r2_index) in index_selector:
# skip if the result was already computed to be lethal
if results[result_index] == 0:
continue
# reactions removed carry no flux
if r1_index in no_flux_reaction_indexes and \
r2_index in no_flux_reaction_indexes:
results[result_index] = wt_growth_rate
continue
pool.submit((r1_index, r2_index), label=result_index)
# get results
for result in pool.receive_all():
results[result[0]] = result[1]
return results
def _double_gene_deletion_fba(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, solver,
number_of_processes=None, zero_cutoff=1e-12,
wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double gene deletions using fba
cobra_model: model
gene_ids1, gene_ids2: lists of id's to be knocked out
gene_id_to_result: maps each gene identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
# Because each gene reaction rule will be evaluated multiple times
# the reaction has multiple associated genes being deleted, compiling
# the gene reaction rules ahead of time increases efficiency greatly.
compiled_rules = get_compiled_gene_reaction_rules(cobra_model)
n_results = len(gene_id_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
if number_of_processes == 1: # explicitly disable multiprocessing
PoolClass = CobraDeletionMockPool
else:
PoolClass = CobraDeletionPool
with PoolClass(cobra_model, n_processes=number_of_processes,
solver=solver, **kwargs) as pool:
# precompute all single deletions in the pool and store them along
# the diagonal
for gene_id, gene_result_index in iteritems(gene_id_to_result):
ko_reactions = find_gene_knockout_reactions(
cobra_model, (cobra_model.genes.get_by_id(gene_id),))
ko_indexes = [cobra_model.reactions.index(i) for i in ko_reactions]
pool.submit(ko_indexes, label=gene_result_index)
for result_index, value in pool.receive_all():
# if singly lethal, set everything in row and column to 0
value = value if abs(value) > zero_cutoff else 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
else: # only the diagonal needs to be set
results[result_index, result_index] = value
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(gene_ids1, gene_ids2,
gene_id_to_result)
for result_index, (gene1, gene2) in index_selector:
# if singly lethal the results have already been set
if results[result_index] == 0:
continue
ko_reactions = find_gene_knockout_reactions(
cobra_model, (gene1, gene2), compiled_rules)
ko_indexes = [cobra_model.reactions.index(i)
for i in ko_reactions]
# if all removed gene indexes carry no flux
if len(set(ko_indexes) - no_flux_reaction_indexes) == 0:
results[result_index] = wt_growth_rate
continue
pool.submit(ko_indexes, label=result_index)
for result in pool.receive_all():
value = result[1]
if value < zero_cutoff:
value = 0
results[result[0]] = value
return results
def _double_reaction_deletion_moma(cobra_model, reaction_indexes1,
reaction_indexes2, reaction_to_result,
solver, number_of_processes=1,
zero_cutoff=1e-15, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double reaction deletions using moma
cobra_model: model
reaction_indexes1, reaction_indexes2: reaction indexes (used as unique
identifiers)
reaction_to_result: maps each reaction identifier to the entry in
the result matrix
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
number_of_processes: must be 1. Parallel MOMA not yet implmemented
returns an upper triangular square matrix
"""
if number_of_processes > 1:
raise NotImplementedError("parallel MOMA not implemented")
if moma is None:
raise RuntimeError("scipy required for MOMA")
# generate the square result matrix
n_results = len(reaction_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
# function to compute reaction knockouts with moma
moma_model, moma_obj = moma.create_euclidian_moma_model(cobra_model)
def run(indexes):
# If all the reactions carry no flux, deletion will have no effect.
if no_flux_reaction_indexes.issuperset(indexes):
return wt_growth_rate
return moma.moma_knockout(moma_model, moma_obj, indexes,
solver=solver, **kwargs).f
# precompute all single deletions and store them along the diagonal
for reaction_index, result_index in iteritems(reaction_to_result):
value = run((reaction_index,))
value = value if abs(value) > zero_cutoff else 0.
results[result_index, result_index] = value
# if singly lethal, the entire row and column are set to 0
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(
reaction_indexes1, reaction_indexes2, reaction_to_result)
for result_index, (r1_index, r2_index) in index_selector:
# skip if the result was already computed to be lethal
if results[result_index] == 0:
continue
else:
results[result_index] = run((r1_index, r2_index))
return results
def _double_gene_deletion_moma(cobra_model, gene_ids1, gene_ids2,
gene_id_to_result, solver,
number_of_processes=1,
zero_cutoff=1e-12, wt_growth_rate=None,
no_flux_reaction_indexes=set(), **kwargs):
"""compute double gene deletions using moma
cobra_model: model
gene_ids1, gene_ids2: lists of id's to be knocked out
gene_id_to_result: maps each gene identifier to the entry in
the result matrix
number_of_processes: must be 1. Parallel MOMA not yet implemented
no_flux_reaction_indexes: set of indexes for reactions in the model
which carry no flux in an optimal solution. For deletions only in
this set, the result will beset to wt_growth_rate.
returns an upper triangular square matrix
"""
if number_of_processes > 1:
raise NotImplementedError("parallel MOMA not implemented")
if moma is None:
raise RuntimeError("scipy required for MOMA")
# Because each gene reaction rule will be evaluated multiple times
# the reaction has multiple associated genes being deleted, compiling
# the gene reaction rules ahead of time increases efficiency greatly.
compiled_rules = get_compiled_gene_reaction_rules(cobra_model)
# function to compute reaction knockouts with moma
moma_model, moma_obj = moma.create_euclidian_moma_model(cobra_model)
def run(gene_ids):
ko_reactions = find_gene_knockout_reactions(cobra_model, gene_ids)
ko_indexes = map(cobra_model.reactions.index, ko_reactions)
# If all the reactions carry no flux, deletion will have no effect.
if no_flux_reaction_indexes.issuperset(gene_ids):
return wt_growth_rate
return moma.moma_knockout(moma_model, moma_obj, ko_indexes,
solver=solver, **kwargs).f
n_results = len(gene_id_to_result)
results = numpy.empty((n_results, n_results))
results.fill(numpy.nan)
# precompute all single deletions and store them along the diagonal
for gene_id, result_index in iteritems(gene_id_to_result):
value = run((gene_id,))
value = value if abs(value) > zero_cutoff else 0.
results[result_index, result_index] = value
# If singly lethal, the entire row and column are set to 0.
if value == 0.:
results[result_index, :] = 0.
results[:, result_index] = 0.
# Run double knockouts in the upper triangle
index_selector = yield_upper_tria_indexes(gene_ids1, gene_ids2,
gene_id_to_result)
for result_index, (gene1, gene2) in index_selector:
# if singly lethal the results have already been set
if results[result_index] == 0:
continue
results[result_index] = run((gene1, gene2))
return results
| lgpl-2.1 |
sinkap/trappy | trappy/pid_controller.py | 2 | 1971 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the power allocator's PID controller in the
current directory's trace.dat"""
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
class PIDController(Base):
"""Process the power allocator PID controller data in a FTrace dump"""
name = "pid_controller"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "thermal_zone_id"
"""The Pivot along which the data is orthogonal"""
unique_word="thermal_power_allocator_pid"
"""The event name in the trace"""
def plot_controller(self, title="", width=None, height=None, ax=None):
"""Plot a summary of the controller data
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
import trappy.plot_utils
title = trappy.plot_utils.normalize_title("PID", title)
if not ax:
ax = trappy.plot_utils.pre_plot_setup(width, height)
self.data_frame[["output", "p", "i", "d"]].plot(ax=ax)
trappy.plot_utils.post_plot_setup(ax, title=title)
register_ftrace_parser(PIDController, "thermal")
| apache-2.0 |
JosmanPS/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/cluster/tests/test_spectral.py | 15 | 7958 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver # noqa
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
EarToEarOak/Wild-Find | wildfind/falconer/heatmap.py | 1 | 4181 | #!/usr/bin/env python
#
#
# Wild Find
#
#
# Copyright 2014 - 2017 Al Brown
#
# Wildlife tracking and mapping
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import copy
import tempfile
import warnings
from PySide import QtCore
import matplotlib
import numpy
matplotlib.use("Agg")
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from wildfind.falconer.utils import unique_locations
IMAGE_SIZE = 300
class HeatMap(QtCore.QObject):
def __init__(self, parent, settings, on_plotted, on_cleared):
QtCore.QObject.__init__(self, parent)
self._settings = settings
self._telemetry = None
self._thread = None
self._tempFile = tempfile.TemporaryFile(suffix='.png')
self._signal = SignalPlot()
self._signal.plotted.connect(on_plotted)
self._signal.cleared.connect(on_cleared)
def __on_plotted(self, bounds):
self._signal.plotted.emit(bounds)
def __on_cleared(self):
self._signal.cleared.emit()
def get_file(self):
return self._tempFile
def set(self, telemetry=None):
if telemetry is not None:
self._telemetry = copy.copy(telemetry)
if self._thread is not None and self._thread.isRunning():
QtCore.QTimer.singleShot(500, self.set)
return
else:
if len(telemetry) > 2:
self._thread = ThreadPlot(self,
self._settings,
self._telemetry,
self._tempFile,
self.__on_plotted)
self._thread.start()
return
self._signal.cleared.emit()
class ThreadPlot(QtCore.QThread):
def __init__(self, parent, settings, telemetry, tempFile, callback):
QtCore.QThread.__init__(self, parent)
self._settings = settings
self._telemetry = telemetry
self._tempFile = tempFile
self._cancel = False
self._signal = SignalPlot()
self._signal.plotted.connect(callback)
def __save(self, figure):
try:
self._tempFile.seek(0)
plt.savefig(self._tempFile)
except IOError:
pass
plt.close(figure)
def run(self):
figure = plt.figure(frameon=False)
axes = figure.add_axes([0, 0, 1, 1])
axes.set_axis_off()
figure.patch.set_alpha(0)
axes.axesPatch.set_alpha(0)
x, y, z = unique_locations(self._telemetry)
east = max(x)
west = min(x)
north = max(y)
south = min(y)
width = east - west
height = north - south
if len(x) > 2 and len(y) > 2 and width >= 10 and height >= 10:
figure.set_size_inches((6, 6. * height / width))
xi = numpy.linspace(west, east, IMAGE_SIZE)
yi = numpy.linspace(south, north, IMAGE_SIZE)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
zi = mlab.griddata(x, y, z, xi=xi, yi=yi,
interp=self._settings.interpolation)
axes.pcolormesh(xi, yi, zi,
cmap=self._settings.heatmapColour)
plt.axis([west, east, south, north])
self.__save(figure)
self._signal.plotted.emit((north, south, east, west))
class SignalPlot(QtCore.QObject):
plotted = QtCore.Signal(object)
cleared = QtCore.Signal()
if __name__ == '__main__':
print 'Please run falconer.py'
exit(1)
| gpl-2.0 |
duncanmmacleod/gwpy | gwpy/plot/tests/test_axes.py | 1 | 9420 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :mod:`gwpy.plot`
"""
import pytest
import numpy
from matplotlib import rcParams
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
from ...time import to_gps
from ...types import (Series, Array2D)
from ...testing import utils
from .. import Axes
from .utils import AxesTestBase
numpy.random.seed(0)
class TestAxes(AxesTestBase):
AXES_CLASS = Axes
def test_plot(self, ax):
series = Series(range(10), dx=.1)
lines = ax.plot(
series,
series * 2, 'k--',
series.xindex, series, 'b-',
[1, 2, 3], [4, 5, 6],
)
# check line 1 maps the series with default params
line = lines[0]
linex, liney = line.get_data()
utils.assert_array_equal(linex, series.xindex.value)
utils.assert_array_equal(liney, series.value)
# check line 2 maps 2*series with specific params
line = lines[1]
linex, liney = line.get_data()
utils.assert_array_equal(linex, series.xindex.value)
utils.assert_array_equal(liney, series.value * 2)
assert line.get_color() == 'k'
assert line.get_linestyle() == '--'
# check line 3
line = lines[2]
linex, liney = line.get_data()
utils.assert_array_equal(linex, series.xindex.value)
utils.assert_array_equal(liney, series.value)
assert line.get_color() == 'b'
assert line.get_linestyle() == '-'
# check line 4
line = lines[3]
linex, liney = line.get_data()
utils.assert_array_equal(linex, [1, 2, 3])
utils.assert_array_equal(liney, [4, 5, 6])
@pytest.mark.parametrize('c_sort', (False, True))
def test_scatter(self, ax, c_sort):
x = numpy.arange(10)
y = numpy.arange(10)
z = numpy.random.random(10)
coll = ax.scatter(x, y, c=z, c_sort=c_sort)
if c_sort:
utils.assert_array_equal(coll.get_array(), z[numpy.argsort(z)])
else:
utils.assert_array_equal(coll.get_array(), z)
# check that c=None works
ax.scatter(x, y, c=None)
# check that using non-array data works
ax.scatter([1], [1], c=[1])
def test_imshow(self, ax):
# standard imshow call
array = numpy.random.random((10, 10))
image2 = ax.imshow(array)
utils.assert_array_equal(image2.get_array(), array)
assert image2.get_extent() == (-.5, array.shape[0]-.5,
array.shape[1]-.5, -.5)
def test_imshow_array2d(self, ax):
# overloaded imshow call (Array2D)
array = Array2D(numpy.random.random((10, 10)), dx=.1, dy=.2)
image = ax.imshow(array)
utils.assert_array_equal(image.get_array(), array.value.T)
assert image.get_extent() == tuple(array.xspan) + tuple(array.yspan)
# check log scale uses non-zero boundaries
ax.clear()
ax.set_xlim(.1, 1)
ax.set_ylim(.1, 1)
ax.set_xscale('log')
ax.set_yscale('log')
image = ax.imshow(array)
assert image.get_extent() == (1e-300, array.xspan[1],
1e-300, array.yspan[1])
def test_pcolormesh(self, ax):
array = Array2D(numpy.random.random((10, 10)), dx=.1, dy=.2)
ax.grid(True, which="both", axis="both")
mesh = ax.pcolormesh(array)
utils.assert_array_equal(mesh.get_array(), array.T.flatten())
utils.assert_array_equal(mesh.get_paths()[-1].vertices[2],
(array.xspan[1], array.yspan[1]))
# check that restore_grid decorator did its job
try:
assert all((
ax.xaxis._major_tick_kw["gridOn"],
ax.xaxis._minor_tick_kw["gridOn"],
ax.yaxis._major_tick_kw["gridOn"],
ax.yaxis._minor_tick_kw["gridOn"],
))
except KeyError: # matplotlib < 3.3.3
assert all((
ax.xaxis._gridOnMajor,
ax.xaxis._gridOnMinor,
ax.yaxis._gridOnMajor,
ax.yaxis._gridOnMinor,
))
def test_hist(self, ax):
x = numpy.random.random(100) + 1
min_ = numpy.min(x)
max_ = numpy.max(x)
n, bins, patches = ax.hist(x, logbins=True, bins=10, weights=1.)
utils.assert_allclose(
bins, numpy.logspace(numpy.log10(min_), numpy.log10(max_),
11, endpoint=True),
)
def test_hist_error(self, ax):
"""Test that `ax.hist` presents the right error message for empty data
"""
with pytest.raises(ValueError) as exc:
ax.hist([], logbins=True)
assert str(exc.value).startswith('cannot generate log-spaced '
'histogram bins')
# assert it works if we give the range manually
ax.hist([], logbins=True, range=(1, 100))
def test_tile(self, ax):
x = numpy.arange(10)
y = numpy.arange(x.size)
w = numpy.ones_like(x) * .8
h = numpy.ones_like(x) * .8
# check default tiling (without colour)
coll = ax.tile(x, y, w, h, anchor='ll')
assert isinstance(coll, PolyCollection)
for i, path in enumerate(coll.get_paths()):
utils.assert_array_equal(
path.vertices,
numpy.asarray([
(x[i], y[i]),
(x[i], y[i] + h[i]),
(x[i] + w[i], y[i] + h[i]),
(x[i] + w[i], y[i]),
(x[i], y[i]),
]),
)
# check colour works with sorting (by default)
c = numpy.arange(x.size)
coll2 = ax.tile(x, y, w, h, color=c)
utils.assert_array_equal(coll2.get_array(), numpy.sort(c))
# check anchor parsing
for anchor in ('lr', 'ul', 'ur', 'center'):
ax.tile(x, y, w, h, anchor=anchor)
with pytest.raises(ValueError):
ax.tile(x, y, w, h, anchor='blah')
@pytest.mark.parametrize('cb_kw', [
{'use_axesgrid': True, 'fraction': 0.},
{'use_axesgrid': True, 'fraction': 0.15},
{'use_axesgrid': False},
])
def test_colorbar(self, ax, cb_kw):
array = Array2D(numpy.random.random((10, 10)), dx=.1, dy=.2)
mesh = ax.pcolormesh(array)
if not cb_kw['use_axesgrid'] and 'fraction' not in cb_kw:
with pytest.warns(PendingDeprecationWarning):
cbar = ax.colorbar(vmin=2, vmax=4, **cb_kw)
else:
cbar = ax.colorbar(vmin=2, vmax=4, **cb_kw)
assert cbar.mappable is mesh
assert cbar.mappable.get_clim() == (2., 4.)
def test_legend(self, ax):
ax.plot(numpy.arange(5), label='test')
leg = ax.legend()
lframe = leg.get_frame()
assert lframe.get_linewidth() == rcParams['patch.linewidth']
for line in leg.get_lines():
assert line.get_linewidth() == 6.
def test_legend_no_handler_map(self, ax):
ax.plot(numpy.arange(5), label='test')
leg = ax.legend(handler_map=None)
for line in leg.get_lines():
assert line.get_linewidth() == rcParams["lines.linewidth"]
def test_legend_deprecated_linewidth(self, ax):
ax.plot(numpy.arange(5), label='test')
with pytest.deprecated_call():
leg = ax.legend(linewidth=4)
assert leg.get_lines()[0].get_linewidth() == 4.
def test_legend_deprecated_alpha(self, ax):
ax.plot(numpy.arange(5), label='test')
with pytest.deprecated_call():
leg = ax.legend(alpha=.1)
assert leg.get_frame().get_alpha() == .1
def test_plot_mmm(self, ax):
mean_ = Series(numpy.random.random(10))
min_ = mean_ * .5
max_ = mean_ * 1.5
a, b, c, d = ax.plot_mmm(mean_, min_, max_)
for line in (a, b, c):
assert isinstance(line, Line2D)
assert isinstance(d, PolyCollection)
assert len(ax.lines) == 3
assert len(ax.collections) == 1
def test_fmt_data(self, ax):
value = 1234567890.123
result = str(to_gps(value))
assert ax.format_xdata(value) == (
ax.xaxis.get_major_formatter().format_data_short(value)
)
ax.set_xscale('auto-gps')
ax.set_yscale('auto-gps')
assert ax.format_xdata(value) == result
assert ax.format_ydata(value) == result
def test_epoch(self, ax):
ax.set_xscale('auto-gps')
assert not ax.get_epoch()
ax.set_epoch(12345)
assert ax.get_epoch() == 12345.0
| gpl-3.0 |
kylerbrown/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
JustinNoel1/CollabLearning | source/datasets.py | 1 | 13236 | #import necessary modules
import os
import sys
import tarfile
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
# import struct
# from array import array
#Our old friends
import tensorflow as tf
import numpy as np
#Progress bar
from tqdm import tqdm
#Our plotting and image packages
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#from ggplot import *
# from IPython.display import display, Image
from scipy import ndimage
#Custom batch manager
from batch_manager import *
# ## Getting the data
def get_cifar(include_test = True, valid_size = 5000):
""" Returns tuple containing the concatenated train and test_images, train and test_labels, the image size,
and the test length for the CIFAR10 dataset.
"""
#pull data
(train_images, train_labels), (test_images, test_labels) = tf.contrib.keras.datasets.cifar10.load_data()
#concatenate and shuffle data
if include_test:
images = np.concatenate([train_images, test_images])
labels = np.concatenate([train_labels, test_labels])
test_length = len(test_labels)
else:
images = train_images
labels = train_labels
test_length = valid_size
total_images, total_labels = randomize(images, labels)
return images, labels, [32, 32, 3], test_length
def get_mnist():
""" Returns tuple containing the concatenated train and test_images, train and test_labels, the image size,
and the test length for the MNIST dataset.
"""
#pull data
(train_images, train_labels), (test_images, test_labels) = tf.contrib.keras.datasets.mnist.load_data('datasets')
#concatenate and shuffle
total_images, total_labels = randomize(np.concatenate([train_images, test_images]),
np.concatenate([train_labels, test_labels]))
image_shape = [28,28,1]
return total_images, total_labels, image_shape, len(test_labels)
def get_notmnist():
""" Returns tuple containing the concatenated train and test_images, train and test_labels, the image size,
and the test length for the notMNIST dataset. This code has been taken from the tensorflow notMNIST example.
"""
pickle_file = 'notMNIST_dataset/data.pkl'
import pickle
try:
with open(pickle_file, 'rb') as f:
return pickle.load(f)
except Exception as e:
print('No pickle file yet.', pickle_file, ':', e)
# from scipy.misc import imresize
#Urls for datasets
notMNIST_url = 'http://yaroslavvb.com/upload/notMNIST/'
#Target directories for downloads
notMNIST_dir = 'notMNIST_dataset'
#Get notMNIST tar.gz files
#Extract notMNIST tar files and get folders
NM_test_filename = download_data('notMNIST_small.tar.gz', notMNIST_url, 8458043, notMNIST_dir)
test_folders = maybe_extract(NM_test_filename,notMNIST_dir)
NM_train_filename = download_data('notMNIST_large.tar.gz', notMNIST_url, 247336696, notMNIST_dir)
train_folders = maybe_extract(NM_train_filename, notMNIST_dir)
train_datasets = maybe_pickle(train_folders, 50000)
test_datasets = maybe_pickle(test_folders, 1000)
train_size = 500000
test_size = 10000
_, _, train_dataset, train_labels = merge_datasets(train_datasets, train_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_images, train_labels = randomize(train_dataset, train_labels)
test_images, test_labels = randomize(test_dataset, test_labels)
total_images = np.concatenate([train_images,test_images])
total_labels = np.concatenate([train_labels,test_labels])
#Vectorize function for iterating over numpy array
# def trans(x):
# return imresize(x, (32,32))
#Resize array
# new_images = np.array([trans(x) for x in total_images])
image_shape = [28,28,1]
with open(pickle_file, 'wb') as output:
pickle.dump([total_images, total_labels, image_shape, test_size], output, pickle.HIGHEST_PROTOCOL)
return total_images, total_labels, image_shape, test_size
def progress_hook(bar):
"""Prints progress bar as we download. Adapted from tqdm example."""
#No static variables in ~ython. Sigh.
last_block = [0]
def inner(count=1, block_size=1, tsize=None):
"""
count : int, optional
Number of blocks just transferred [default: 1].
block_size : int, optional
Size of each block (in tqdm units) [default: 1].
tsize : int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
bar.total = tsize
bar.update((count - last_block[0]) * block_size)
last_block[0] = count
return inner
def download_data(filename, url, expected_bytes, directory = 'datasets'):
"""Download filename from url into directory. Adapted from tensorflow example."""
#Make directory if necessary
if not os.path.exists(directory):
os.makedirs(directory)
dest_filename = os.path.join(directory, filename)
if not os.path.exists(dest_filename):
print('Attempting to download:', filename)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as pbar:
filename, _ = urlretrieve(url + filename, dest_filename,
reporthook=progress_hook(pbar))
print('\nDownload Complete!')
#Check the file
statinfo = os.stat(dest_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', dest_filename)
else:
raise Exception(
'Failed to verify ' + dest_filename + '. Can you get to it with a browser?')
return dest_filename
def maybe_extract(filename, force=False, data_root = 'notMNIST_dataset'):
"""Extracts the notMNIST data if necessary and returns a list of folders containing the data for each letter"""
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(data_root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall(data_root)
tar.close()
data_folders = [os.path.join(root, d) for d in sorted(os.listdir(root)) if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != 10:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
10, len(data_folders)))
print(data_folders)
return data_folders
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
pixel_depth = 255.0 # Number of levels per pixel.
image_size = 28 # Pixel width and height.
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = ndimage.imread(image_file).astype(float)
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
"""Pickles the data for each letter if it has not already been pickled"""
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
pass
# You may override by setting force=True.
#print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
def make_arrays(nb_rows, img_size):
"""Initializes numpy arrays"""
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
"""Merges the data from each of the letter classes
Returns:
tuple containing the training set, the training labels, the validation set, and the validation labels.
"""
image_size = 28
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
def randomize(dataset, labels):
"""Shuffles the dataset and labels
Returns:
pair of shuffled dataset and shuffled labels
"""
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def show_data(images, labels, label_names, title = "Sample Images", num_images = 5, shape = (28,28),
aug_params = None, output_dir = 'latex/images'):
"""Displays plots of images with their labels.
Args:
images (nparray) : array of images
labels (nparray) : array of labels.
label_names (dict) : a dictionary between the labels and the corresponding names to be printed
title (str): title for figure
num_images (int): number of images to show
shape (int, int) or (int, int, int): shape of image to show
distort (bool) : if true include display of distorted images.
"""
#If we are distorting images, add another row to the figure and enlarge it
if aug_params != None:
idg = tf.contrib.keras.preprocessing.image.ImageDataGenerator(**aug_params)
num_rows = 2
fig = plt.figure(figsize = (2*num_images, 4))
else:
num_rows = 1
fig = plt.figure(figsize = (2*num_images, 2.5))
#for each image in our row
for i in range(num_images):
ix = np.random.randint(0, len(images))
re = images[ix]
ax = fig.add_subplot(num_rows, num_images, i+1)
ax.set_xlabel("Image = " + str(ix) + "\nLabel = " + label_names[int(labels[ix])])
ax.imshow(re)
ax.set_xticks([])
ax.set_yticks([])
#if we are distorting images, print another row of the same images, but distorted
if aug_params != None:
ax = fig.add_subplot(num_rows, num_images, num_images+i+1)
re = idg.random_transform(images[ix])
ax.set_xlabel("Distorted Image = " + str(ix))
ax.imshow(re)
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout(w_pad=4.0, h_pad=2.0)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fn = os.path.join(output_dir, title + '.png')
plt.savefig(fn)
| apache-2.0 |
cl4rke/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 338 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 28 | 18031 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
thomaslima/PySpice | examples/transmission-lines/time-delay.py | 1 | 1528 | ####################################################################################################
import matplotlib.pyplot as plt
####################################################################################################
import PySpice.Logging.Logging as Logging
logger = Logging.setup_logging()
####################################################################################################
from PySpice.Probe.Plot import plot
from PySpice.Spice.Netlist import Circuit
####################################################################################################
circuit = Circuit('Transmission Line')
circuit.Pulse('pulse', 'input', circuit.gnd, 0, 1, 1e-9, 1e-6)
circuit.R('load', 'output', circuit.gnd, 50)
circuit.TransmissionLine('delay', 'input', circuit.gnd, 'output',
circuit.gnd, impedance=50, time_delay=40e-9)
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.transient(step_time=1e-11, end_time=100e-9)
####################################################################################################
plt.figure(None, (20, 6))
plot(analysis['input'])
plot(analysis['output'])
plt.xlabel('Time [s]')
plt.ylabel('Voltage (V)')
plt.grid()
plt.legend(['input', 'output'], loc='upper right')
plt.show()
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 |
CallaJun/hackprince | indico/skimage/viewer/utils/core.py | 5 | 6569 | import warnings
import numpy as np
from skimage.viewer.qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warnings.warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| lgpl-3.0 |
shahankhatch/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
TaxIPP-Life/til-france | til_france/tests/rescale_migration.py | 1 | 2539 | # -*- coding:utf-8 -*-
from __future__ import division
import os
import numpy
import pandas
from til_france.tests.base import til_france_path
def rescale_migration():
# Data from INSEE projections
data_path = os.path.join(til_france_path, 'param/demo')
sheetname_by_gender = dict(zip(
['total', 'male', 'female'],
['populationTot', 'populationH', 'populationF']
))
population_insee_by_gender = dict(
(
gender,
pandas.read_excel(
os.path.join(data_path, 'projpop0760_FECcentESPcentMIGcent.xls'),
sheetname = sheetname,
skiprows = 2,
header = 2
)[:109].set_index(u'Âge au 1er janvier')
)
for gender, sheetname in sheetname_by_gender.iteritems()
)
migration_insee_by_gender = dict(
(
gender,
pandas.read_csv(os.path.join(data_path, 'hyp_soldemig{}.csv'.format(suffix)))
# 0 à 109 ans et pas 11 comme dans le fichiex xls
)
for gender, suffix in dict(female = 'F', male = 'H').iteritems()
)
with open(os.path.join(data_path, 'hyp_soldemigH.csv'), 'r') as header_file:
header = header_file.read().splitlines(True)[:2]
for gender, migration in migration_insee_by_gender.iteritems():
migration.iloc[0, 0] = 0
migration_extract_total = migration.iloc[1:, 1:].copy().sum()
migration_extract = numpy.maximum(migration.iloc[1:, 1:].copy(), 0)
# Resclaing to deal with emigration
migration_extract = migration_extract * migration_extract_total / migration_extract.sum()
total_population = population_insee_by_gender[gender]
total_population.index = migration_extract.index[:-1]
migration_extract.iloc[:-1, :] = (
migration_extract.iloc[:-1, :].copy().astype(float).values / total_population.astype(float).values
)
migration.iloc[1:, 1:] = migration_extract.values
migration.age = migration.age.astype(int)
suffix = 'F' if gender == 'female' else 'H'
file_path = os.path.join(data_path, 'hyp_soldemig{}_custom.csv'.format(suffix))
migration.to_csv(file_path, index = False, header = False)
with open(file_path, 'r') as input_file:
data = input_file.read().splitlines(True)
with open(file_path, 'w') as output_file:
output_file.writelines(header)
output_file.writelines(data[1:])
| gpl-3.0 |
jhprinz/openpathsampling | openpathsampling/high_level/move_scheme.py | 1 | 35322 | import openpathsampling as paths
from openpathsampling.tools import refresh_output
from . import move_strategy
from .move_strategy import levels as strategy_levels
from openpathsampling.netcdfplus import StorableNamedObject
try:
import pandas as pd
has_pandas = True
except ImportError:
has_pandas = False
pd = None
import sys
class MoveScheme(StorableNamedObject):
"""
Creates a move decision tree based on `MoveStrategy` instances.
Attributes
----------
movers : dict
Dictionary mapping mover group as key to list of movers
strategies : dict
Dictionary mapping level (number) to list of strategies
root_mover : PathMover
Root of the move decision tree (`None` until tree is built)
"""
def __init__(self, network):
super(MoveScheme, self).__init__()
self.movers = {}
self.network = network
self.strategies = {}
self.balance_partners = {}
self.choice_probability = {}
self._real_choice_probability = {} # used as override, e.g., in SRTIS
self.root_mover = None
self._mover_acceptance = {} # used in analysis
def to_dict(self):
ret_dict = {
'movers': self.movers,
'network': self.network,
'choice_probability': self.choice_probability,
'real_choice_probability': self.real_choice_probability,
'balance_partners': self.balance_partners,
'root_mover': self.root_mover,
}
return ret_dict
@classmethod
def from_dict(cls, dct):
scheme = cls.__new__(cls)
# noinspection PyArgumentList
scheme.__init__(dct['network'])
scheme.movers = dct['movers']
scheme.choice_probability = dct['choice_probability']
scheme._real_choice_probability = dct['real_choice_probability']
scheme.balance_partners = dct['balance_partners']
scheme.root_mover = dct['root_mover']
return scheme
@property
def real_choice_probability(self):
if self._real_choice_probability == {}:
return self.choice_probability
else:
return self._real_choice_probability
@real_choice_probability.setter
def real_choice_probability(self, value):
self._real_choice_probability = value
def append(self, strategies, levels=None, force=False):
"""
Adds new strategies to this scheme, organized by `level`.
Parameters
----------
strategies : MoveStrategy or list of MoveStrategy
strategies to add to this scheme
levels : integer or list of integer or None
levels to associate with each strategy. If None, strategy.level.
force : bool
force the strategy to be appended, even if a root_mover exists.
Default False for safety.
"""
# first we clean up the input: strategies is a list of MoveStrategy;
# levels is a list of integers
if self.root_mover is not None:
if force:
self.root_mover = None
else:
raise RuntimeError("Can't add strategies after the move " +
"decision tree has been built. " +
"Override with `force=True`.")
try:
strategies = list(strategies)
except TypeError:
strategies = [strategies]
if levels is not None:
try:
levels = list(levels)
except TypeError:
levels = [levels]*len(strategies)
else:
levels = []
for strat in strategies:
levels.append(strat.level)
# now we put everything into appropriate dictionaries
for strat, lev in zip(strategies, levels):
try:
self.strategies[lev].append(strat)
except KeyError:
self.strategies[lev] = [strat]
# TODO: it might be nice to have a way to "lock" this once it has been
# saved. That would prevent a (stupid) user from trying to rebuild a
# custom-modified tree.
def build_move_decision_tree(self):
for lev in sorted(self.strategies.keys()):
for strat in self.strategies[lev]:
self.apply_strategy(strat)
# TODO: should I make this a property? make root_mover into
# _move_decision_tree? allow the user to directly set it? rename as
# move_scheme? separated from building until some of that is clarified
def move_decision_tree(self, rebuild=False):
"""
Returns the move decision tree.
Parameters
----------
rebuild : bool, optional
Whether to rebuild the tree, or use the previously build version
(default is False, if no tree exists, sets to True)
Returns
-------
PathMover
Root mover of the move decision tree
"""
if self.root_mover is None:
rebuild = True
if rebuild:
self.choice_probability = {}
self.build_move_decision_tree()
return self.root_mover
def apply_strategy(self, strategy):
"""
Applies given strategy to the scheme as it stands.
This is the tool used in the process of building up the move
decision tree.
Parameters
----------
strategy : MoveStrategy
the strategy to apply
"""
movers = strategy.make_movers(self)
group = strategy.group
if strategy_levels.level_type(strategy.level) == strategy_levels.GLOBAL:
# shortcut out for the global-level stuff
self.root_mover = movers
elif strategy.replace_signatures:
self.movers[group] = movers
elif strategy.replace_movers:
try:
n_existing = len(self.movers[group])
except KeyError:
# if the group doesn't exist, set it to these movers
self.movers[group] = movers
else:
# Note that the following means that if the list of new
# movers includes two movers with the same sig, the second
# will overwrite the first. This is desired behavior. On the
# other hand, if the list of old movers in the group already
# has two movers with the same signature, then both should
# be overwritten.
existing_sigs = {}
for i in range(n_existing):
key = self.movers[group][i].ensemble_signature
try:
existing_sigs[key].append(i)
except KeyError:
existing_sigs[key] = [i]
# For each mover, if its signature exists in the existing
# movers, replace the existing. Otherwise, append it to the
# list.
for mover in movers:
m_sig = mover.ensemble_signature
if m_sig in existing_sigs.keys():
for idx in existing_sigs[m_sig]:
self.movers[group][idx] = mover
else:
self.movers[group].append(mover)
elif strategy.replace_group:
if strategy.from_group is not None:
self.movers.pop(strategy.from_group)
self.movers[group] = movers
else:
try:
self.movers[group].extend(movers)
except KeyError:
self.movers[group] = movers
def ensembles_for_move_tree(self, root=None):
"""
Finds the list of all ensembles in the move tree starting at `root`.
Parameters
----------
root : PathMover
Mover to act as root of this tree (can be a subtree). Default is
`None`, in which case `self.root_mover` is used.
Returns
-------
list of Ensemble
ensembles which appear in this (sub)tree
"""
if root is None:
if self.root_mover is None:
self.root_mover = self.move_decision_tree()
root = self.root_mover
movers = root.map_pre_order(lambda x: x)
mover_ensemble_dict = {}
for m in movers:
input_sig = m.input_ensembles
output_sig = m.output_ensembles
for ens in input_sig + output_sig:
mover_ensemble_dict[ens] = 1
mover_ensembles = list(mover_ensemble_dict.keys())
return mover_ensembles
def find_hidden_ensembles(self, root=None):
"""
All ensembles which exist in the move scheme but not in the network.
Hidden ensembles are typically helper ensembles for moves; for
example, the minus move uses a "segment" helper ensemble which is
almost, but not quite, the innermost interface ensemble.
Parameters
----------
root : PathMover
Mover to act as root of this tree (can be a subtree). Default is
`None`, in which case `self.root_mover` is used.
Returns
-------
set of Ensemble
"hidden" ensembles; the ensembles which are in the scheme but
not the network.
"""
unhidden_ensembles = set(self.network.all_ensembles)
mover_ensembles = set(self.ensembles_for_move_tree(root))
hidden_ensembles = mover_ensembles - unhidden_ensembles
return hidden_ensembles
def find_unused_ensembles(self, root=None):
"""
All ensembles which exist in the network but not in the move scheme.
Not all move schemes will use all the ensembles. For example, a move
scheme might choose not to use the network's automatically generated
minus ensemble or multistate ensemble.
Parameters
----------
root : PathMover
Mover to act as root of this tree (can be a subtree). Default is
`None`, in which case `self.root_mover` is used.
Returns
-------
set of Ensemble
"unused" ensembles; the ensembles which are in the network but
not the scheme.
"""
unhidden_ensembles = set(self.network.all_ensembles)
mover_ensembles = set(self.ensembles_for_move_tree(root))
unused_ensembles = unhidden_ensembles - mover_ensembles
return unused_ensembles
def find_used_ensembles(self, root=None):
"""
All ensembles which are both in the network and in the move scheme.
"""
unhidden_ensembles = set(self.network.all_ensembles)
mover_ensembles = set(self.ensembles_for_move_tree(root))
used_ensembles = unhidden_ensembles & mover_ensembles
return used_ensembles
def check_for_root(self, fcn_name):
"""
Raises runtime warning if self.root_mover not set.
Some functions are only valid after the decision tree has been
built. This complains if the tree is not there.
"""
if self.root_mover is None:
warnstr = ("Can't use {fcn_name} before building the move " +
"decision tree").format(fcn_name=fcn_name)
raise RuntimeWarning(warnstr)
def list_initial_ensembles(self, root=None):
"""
Returns a list of initial ensembles for this move scheme.
Used in `initial_conditions_from_trajectories` to get the ensembles
we need. The list returned by this is of a particular format: it
should be thought of as a list of lists of ensembles. Call this the
"list" and the "sublists". At least one member of each sublist is
required, and if a "sublist" is actually an ensemble, it is treated
as a sublist of one. So returning [a, b, [c, d], e] is equivalent to
returning [[a], [b], [c, d], [e]], and is interpreted as "initial
conditions are ensembles a, b, e, and one of either c or d".
To make the simplest cases more explicit, normal all-replica TIS for
ensembles a, b, and c would return [a, b, c], or equivalently, [[a],
[b], [c]]. Single-replica TIS would return [[a, b, c]].
"""
# basically, take the find_used_ensembles and return them in the
# canonical order from network.all_ensembles
used_ensembles = self.find_used_ensembles(root)
output_ensembles = [ens for ens in self.network.all_ensembles
if ens in used_ensembles]
return output_ensembles
def initial_conditions_from_trajectories(self, trajectories,
sample_set=None,
strategies=None,
preconditions=None,
reuse_strategy='avoid-symmetric',
engine=None):
"""
Create a SampleSet with as many initial samples as possible.
The goal of this is to give the initial SampleSet that would be
desired.
Parameters
----------
trajectories : list of :class:`.Trajectory` or :class:`.Trajectory`
the input trajectories to use
sample_set : :class:`.SampleSet`, optional
if given, add samples to this sampleset. Default is None, which
means that this will start a new sampleset.
strategies : dict
a dict that specifies the options used when ensemble functions
are used to create a new sample.
preconditions : list of str
a list of possible steps to modify the initial list of trajectories.
possible choices are
1. `sort-shortest` - sorting by shortest first,
2. `sort_median` - sorting by the middle one first and then in
move away from the median length
3. `sort-longest` - sorting by the longest first
4. `reverse` - reverse the order and
5. `mirror` which will add the reversed trajectories to the
list in the same order
Default is `None` which means to do nothing.
reuse_strategy : str
if `avoid` then reusing the same same trajectory twice is avoided.
`avoid-symmetric` will also remove reversed copies
if possible. `all` will not attempt to avoid already existing ones.
`once` will strictly not reuse a trajectory and `once-symmetric`
will also not use reversed copies.
engine : :class:`openpathsampling.engines.DyanmicsEngine`
the engine used for extending moves
Returns
-------
:class:`.SampleSet`
sampleset with samples for every initial ensemble for this
scheme that could be satisfied by the given trajectories
See Also
--------
list_initial_ensembles
check_initial_conditions
assert_initial_conditions
"""
if sample_set is None:
sample_set = paths.SampleSet([])
ensembles = self.list_initial_ensembles()
sample_set = sample_set.generate_from_trajectories(
ensembles,
trajectories,
preconditions,
strategies,
reuse_strategy,
engine
)
refresh_output(self.initial_conditions_report(sample_set),
ipynb_display_only=True, print_anyway=False)
return sample_set
def check_initial_conditions(self, sample_set):
"""
Check for missing or extra ensembles for initial conditions.
This is primary used programmatically as a reusable function for
several use cases where we need this information. See functions
under "see also" for examples of such cases.
Parameters
----------
sample_set : :class:`.SampleSet`
proposed set of initial conditions for this movescheme
Returns
-------
missing : list of list of :class:`.Ensemble`
ensembles needed by the move scheme and missing in the sample
set, in the format used by `list_initial_ensembles`
extra : list of :class:`.Ensemble`
ensembles in the sampleset that are not used by the
See Also
--------
list_initial_ensembles
assert_initial_conditions
initial_conditions_report
"""
return sample_set.check_ensembles(self.list_initial_ensembles())
def assert_initial_conditions(self, sample_set, allow_extras=False):
"""
Assertion that the given sampleset is good for initial conditions.
Parameters
----------
sample_set : :class:`.SampleSet`
proposed set of initial conditions for this movescheme
allow_extras : bool
whether extra ensembles are allowed, default False, meaning the
extra ensembles raise an assertion error
Raises
------
AssertionError
the proposed initial conditions are not valid for this scheme
See Also
--------
check_initial_conditions
initial_conditions_report
"""
(missing, extras) = self.check_initial_conditions(sample_set)
msg = ""
if len(missing) > 0:
msg += "Missing ensembles: " + str(missing) + "\n"
if len(extras) > 0 and not allow_extras:
msg += "Extra ensembles: " + str(extras) + "\n"
if msg != "":
raise AssertionError("Bad initial conditions.\n" + msg)
def initial_conditions_report(self, sample_set):
"""
String report on whether the given SampleSet gives good initial
conditions.
This is intended to provide a user-friendly tool for interactive
setup.
Parameters
----------
sample_set : :class:`.SampleSet`
proposed set of initial conditions for this movescheme
Returns
-------
str
a human-readable string describing if (and which) ensembles are
missing
"""
(missing, extra) = self.check_initial_conditions(sample_set)
msg = ""
if len(missing) == 0:
msg += "No missing ensembles.\n"
else:
msg += "Missing ensembles:\n"
for ens_list in missing:
msg += "* ["
msg += ", ".join([ens.name for ens in ens_list]) + "]\n"
if len(extra) == 0:
msg += "No extra ensembles.\n"
else:
msg += "Extra ensembles:\n"
for ens in extra:
msg += "* " + ens.name + "\n"
return msg
def build_balance_partners(self):
"""
Create list of balance partners for all movers in groups.
The balance partners are the movers in the same mover group which
have the opposite ensemble signature (input and output switched).
These are used when dynamically calculating detailed balance.
Note
----
Currently, every mover in a group must have exactly one balance
partner. In the future, this might be relaxed to "at least one".
"""
self.check_for_root("build_balance_partners")
for groupname in self.movers.keys():
group = self.movers[groupname]
for mover in group:
partner_sig_set = (set(mover.output_ensembles),
set(mover.input_ensembles))
partners = [m for m in group
if m.ensemble_signature_set == partner_sig_set]
self.balance_partners[mover] = partners
if len(partners) != 1:
warnstr = "Mover {0}: number of balance partners is {1}"
raise RuntimeWarning(warnstr.format(mover, len(partners)))
return self.balance_partners
def _select_movers(self, input_mover):
"""
Return list of movers from group name, list of movers, or mover.
Mainly used to regularize input for other functions.
Parameters
----------
input_mover : PathMover or list of PathMover or string
Returns
-------
list of PathMover
If `input_mover` is list of PathMovers, returns same. If
`input_mover` is PathMover, wraps it in a list. If `input_mover`
is a string, uses that key in self.movers.
"""
try:
movers = self.movers[input_mover]
except TypeError: # unhashable type: 'list'
movers = input_mover
except KeyError: # input_mover not found
movers = [input_mover]
# here we do a little type-checking
for m in movers:
try:
assert(isinstance(m, paths.PathMover))
except AssertionError:
msg = ("Bad output from _select_movers: " + str(movers)
+ "; " + repr(m) + " is not a PathMover\n")
msg += ("Are you using a group name before building the "
+ "move decision tree?")
raise TypeError(msg)
return movers
def n_steps_for_trials(self, mover, n_attempts):
"""
Return number of MC steps to expect `n_attempts` trials of `mover`.
Read this as "To get `n_attempts` trials of `mover`, you need around
`scheme.n_steps_for_trials(mover, n_attempts)` MC steps. If `mover`
is a (string) key for a group, then return the total for that group.
If mover is a list of movers, return the total for that list.
Parameters
----------
mover : PathMover or list of PathMover or string
The mover of interest. See MoveScheme._select_movers for
interpretation.
n_attempts : The desired number of attempts of `mover`
Returns
-------
float
expected number of steps to get `n_attempts` of `mover`
"""
movers = self._select_movers(mover)
total_probability = sum([self.real_choice_probability[m]
for m in movers])
return n_attempts / total_probability
def n_trials_for_steps(self, mover, n_steps):
"""
Return number of attempts expected for `mover` after `n_steps`.
Read this as "If you run `n_steps` Monte Carlo steps, you can expect
to have about `scheme.n_trials_in_steps(mover, n_steps)` trials of
`mover`. If `mover` is a (string) key for a group, then return the
total for that group. If mover is a list of movers, return the
total for that list.
Parameters
----------
mover : PathMover or list of PathMover or string
The mover of interest. See MoveScheme._select_movers for
interpretation.
n_steps : The number of hypothetical MC steps
Returns
-------
float
expected number of trials of `mover` in `n_steps` MC steps
"""
movers = self._select_movers(mover)
total_probability = sum([self.real_choice_probability[m]
for m in movers])
return total_probability * n_steps
def sanity_check(self):
# check that all sampling ensembles are used
sampling_transitions = self.network.sampling_transitions
all_sampling_ensembles = sum(
[t.ensembles for t in sampling_transitions], []
)
unused = self.find_unused_ensembles()
for ens in unused:
try:
assert(ens not in all_sampling_ensembles)
except AssertionError as e:
failmsg = "Sampling ensemble {ens} unused in move scheme {s}\n"
e.args = [failmsg.format(ens=ens.name, s=self)]
raise
# check that choice_probability adds up
total_choice = sum(self.choice_probability.values())
try:
assert(abs(total_choice - 1.0) < 1e-7)
except AssertionError as e:
failmsg = "Choice probability not normalized for scheme {s}\n"
e.args = [failmsg.format(s=self)]
raise
# check for duplicated movers in groups
all_movers = sum(self.movers.values(), [])
all_unique_movers = set(all_movers)
try:
assert(len(all_movers) == len(all_unique_movers))
except AssertionError as e:
failmsg = "At least one group-level mover duplicated " \
"in scheme {s}\n"
e.args = [failmsg.format(s=self)]
raise
# note that the test for the same ens sig is part of the balance
# calc
return True # if we get here, then we must have passed tests
def _move_summary_line(self, move_name, n_accepted, n_trials,
n_total_trials, expected_frequency, indentation):
try:
acceptance = float(n_accepted) / n_trials
except ZeroDivisionError:
acceptance = float("nan")
line = ("* "*indentation + str(move_name) +
" ran " + "{:.3%}".format(float(n_trials)/n_total_trials) +
" (expected {:.2%})".format(expected_frequency) +
" of the cycles with acceptance " + str(n_accepted) + "/" +
str(n_trials) + " ({:.2%})\n".format(acceptance))
return line
def move_acceptance(self, steps):
for step in steps:
delta = step.change
for m in delta:
acc = 1 if m.accepted else 0
key = (m.mover, str(delta.key(m)))
is_trial = 1
# if hasattr(key[0], 'counts_as_trial'):
# is_trial = 1 if key[0].counts_as_trial else 0
try:
self._mover_acceptance[key][0] += acc
self._mover_acceptance[key][1] += is_trial
except KeyError:
self._mover_acceptance[key] = [acc, is_trial]
def move_summary(
self, steps, movers=None, output=sys.stdout, depth=0):
"""
Provides a summary of the movers in `steps`.
The summary includes the number of moves attempted and the
acceptance rate. In some cases, extra lines are printed for each of
the submoves.
Parameters
----------
steps : iterable of :class:`.MDStep`
steps to analyze
movers : None or string or list of PathMover
If None, provides a short summary of the keys in self.mover. If
a string, provides a short summary using that string as a key in
the `movers` dict. If a mover or list of movers, provides
summary for each of those movers.
output : file
file to direct output
depth : integer or None
depth of submovers to show: if integer, shows that many
submovers for each move; if None, shows all submovers
"""
my_movers = {}
expected_frequency = {}
if movers is None:
movers = list(self.movers.keys())
if type(movers) is str:
movers = self.movers[movers]
for key in movers:
try:
my_movers[key] = self.movers[key]
except KeyError:
my_movers[key] = [key]
# if scheme_copies is not None:
# for sc in scheme_copies:
# movers = sc.movers.keys()
# for key in movers:
# my_movers[key].extend(self.movers[key])
stats = {}
for groupname in my_movers.keys():
stats[groupname] = [0, 0]
if self._mover_acceptance == {}:
self.move_acceptance(steps)
no_move_keys = [k for k in self._mover_acceptance.keys()
if k[0] is None]
n_in_scheme_no_move_trials = sum([self._mover_acceptance[k][1]
for k in no_move_keys
if k[1] != '[None]'])
n_no_move_trials = sum([self._mover_acceptance[k][1]
for k in self._mover_acceptance.keys()
if k[0] is None])
tot_trials = len(steps) - n_no_move_trials
if n_in_scheme_no_move_trials > 0:
output.write(
"Null moves for " + str(n_in_scheme_no_move_trials)
+ " cycles. Excluding null moves:\n"
)
for groupname in my_movers.keys():
group = my_movers[groupname]
for mover in group:
key_iter = (k for k in self._mover_acceptance.keys()
if k[0] == mover)
for k in key_iter:
stats[groupname][0] += self._mover_acceptance[k][0]
stats[groupname][1] += self._mover_acceptance[k][1]
try:
# if null moves don't count
expected_frequency[groupname] = sum(
[self.choice_probability[m] for m in group]
)
## if null moves count
# expected_frequency[groupname] = sum(
# [self.real_choice_probability[m] for m in group]
# )
except KeyError:
expected_frequency[groupname] = float('nan')
for groupname in my_movers.keys():
if has_pandas and isinstance(output, pd.DataFrame):
# TODO Pandas DataFrame Output
pass
else:
line = self._move_summary_line(
move_name=groupname,
n_accepted=stats[groupname][0],
n_trials=stats[groupname][1],
n_total_trials=tot_trials,
expected_frequency=expected_frequency[groupname],
indentation=0
)
output.write(line)
# raises AttributeError if no write function
class DefaultScheme(MoveScheme):
"""
Just a MoveScheme with the full set of default strategies: nearest
neighbor repex, uniform selection one-way shooting, minus move, and
path reversals, all structured as choose move type then choose specific
move.
"""
def __init__(self, network, engine=None):
super(DefaultScheme, self).__init__(network)
n_ensembles = len(network.sampling_ensembles)
self.append(move_strategy.NearestNeighborRepExStrategy())
self.append(move_strategy.OneWayShootingStrategy(engine=engine))
self.append(move_strategy.PathReversalStrategy())
self.append(move_strategy.MinusMoveStrategy(engine=engine))
global_strategy = move_strategy.OrganizeByMoveGroupStrategy()
self.append(global_strategy)
try:
msouters = self.network.special_ensembles['ms_outer']
except KeyError:
# if no ms_outer, ignore the ms_outer setup for now; later we
# might default to a state swap
pass
else:
for ms in msouters.keys():
self.append(move_strategy.OneWayShootingStrategy(
ensembles=[ms],
group="ms_outer_shooting",
engine=engine
))
self.append(move_strategy.PathReversalStrategy(
ensembles=[ms],
replace=False
))
ms_neighbors = [t.ensembles[-1] for t in msouters[ms]]
pairs = [[ms, neighb] for neighb in ms_neighbors]
self.append(move_strategy.SelectedPairsRepExStrategy(
ensembles=pairs
))
class LockedMoveScheme(MoveScheme):
def __init__(self, root_mover, network=None, root_accepted=None):
super(LockedMoveScheme, self).__init__(network)
self.root_mover = root_mover
def append(self, strategies, levels=None, force=False):
raise TypeError("Locked schemes cannot append strategies")
def build_move_decision_tree(self):
# override with no-op
pass
def move_decision_tree(self, rebuild=False):
return self.root_mover
def apply_strategy(self, strategy):
raise TypeError("Locked schemes cannot apply strategies")
def to_dict(self):
# things that we always have (from MoveScheme)
ret_dict = {
'network': self.network,
'balance_partners': self.balance_partners,
'root_mover': self.root_mover,
'movers': self._movers,
'choice_probability': self._choice_probability,
'real_choice_probability': self._real_choice_probability}
# things that LockedMoveScheme overrides
return ret_dict
@property
def choice_probability(self):
if self._choice_probability == {}:
raise AttributeError("'choice_probability' must be manually " +
"set in 'LockedMoveScheme'")
else:
return self._choice_probability
@choice_probability.setter
def choice_probability(self, vals):
self._choice_probability = vals
@property
def movers(self):
if self._movers == {}:
raise AttributeError("'movers' must be manually " +
"set in 'LockedMoveScheme'")
else:
return self._movers
@movers.setter
def movers(self, vals):
self._movers = vals
class SRTISScheme(DefaultScheme):
"""
This gives exactly the DefaultMoveScheme, but as an SRTIS setup.
"""
def __init__(self, network, bias=None, engine=None):
super(SRTISScheme, self).__init__(network, engine)
sr_minus_strat = move_strategy.SingleReplicaMinusMoveStrategy(
engine=engine
)
sr_minus_strat.level = move_strategy.levels.SUPERGROUP # GROUP?
# maybe this should be the default for that strategy anyway? using it
# at mover-level seems less likely than group-level
self.append([move_strategy.PoorSingleReplicaStrategy(),
move_strategy.EnsembleHopStrategy(bias=bias),
sr_minus_strat])
class OneWayShootingMoveScheme(MoveScheme):
"""
MoveScheme with only a OneWayShooting strategy.
Useful for building on top of. Useful as default for TPS.
"""
def __init__(self, network, selector=None, ensembles=None, engine=None):
super(OneWayShootingMoveScheme, self).__init__(network)
self.append(move_strategy.OneWayShootingStrategy(selector=selector,
ensembles=ensembles,
engine=engine))
self.append(move_strategy.OrganizeByMoveGroupStrategy())
| lgpl-2.1 |
skipzone/Illumicone | sbin/plotCpuTemperature.py | 1 | 2809 | #! /usr/bin/python
'''
----------------------------------------------------------------------------
This program plots the logged temperature of a Raspberry Pi 4 CPU
Ross Butler December 2019
----------------------------------------------------------------------------
This file is part of Illumicone.
Illumicone is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Illumicone is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Illumicone. If not, see <http://www.gnu.org/licenses/>.
'''
from datetime import datetime, timedelta
import os
import re
import matplotlib
import matplotlib.pyplot as plt
from struct import *
import sys
#from time import sleep
lastTimestamp = None
def readLogFile(logFileName):
# 2019-12-08T13:21:01-07:00 47.0 116.6
temperatureLinePattern = r'(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})-\d{2}:\d{2} (-?\d+\.?\d*) (-?\d+\.?\d*)$'
x = []
y = []
try:
with open(logFileName, 'r') as f:
for line in f:
line = line.rstrip()
m = re.search(temperatureLinePattern, line)
if m is not None:
timestamp = datetime.strptime(m.group(1), '%Y-%m-%dT%H:%M:%S')
tempF = float(m.group(3))
x.append(timestamp)
y.append(tempF)
next
# If we're here then the line was unrecognized, so ignore it.
except IOError as e:
sys.stderr.write('An error occurred with log file {0}. {1}\n'.format(logFileName, e))
return x, y
def doPlot(x, y):
plt.plot(x, y)
plt.gcf().autofmt_xdate()
myFmt = matplotlib.dates.DateFormatter('%d %b %H:%M')
plt.gca().xaxis.set_major_formatter(myFmt)
plt.title(r'Raspberry Pi 4 CPU Temperature')
plt.xlabel(r'Date & Time');
plt.ylabel(r'CPU Temperature ($\degree$F)')
plt.show()
def usage():
print('Usage: plotCpuTemperature.py log_file_name')
return
def main(argv):
if len(argv) != 2:
usage()
return 2
inputFileName = argv[1]
if not os.path.exists(inputFileName):
sys.stderr.write('File {0} does not exist.\n'.format(inputFileName))
return 1
x, y = readLogFile(inputFileName)
doPlot(x, y)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-3.0 |
cmbclh/vnpy1.7 | docker/dockerTrader/ctaStrategy/strategy/strategyAtrRsi.py | 5 | 11577 | # encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
import talib
import numpy as np
from ..ctaBase import *
from ..ctaTemplate import CtaTemplate
########################################################################
class AtrRsiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'AtrRsiStrategy'
author = u'用Python的交易员'
# 策略参数
atrLength = 22 # 计算ATR指标的窗口数
atrMaLength = 10 # 计算ATR均线的窗口数
rsiLength = 5 # 计算RSI的窗口数
rsiEntry = 16 # RSI的开仓信号
trailingPercent = 0.8 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # 每次交易的数量
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 100 # 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrCount = 0 # 目前已经缓存了的ATR的计数
atrArray = np.zeros(bufferSize) # ATR指标的数组
atrValue = 0 # 最新的ATR指标数值
atrMa = 0 # ATR移动平均的数值
rsiValue = 0 # RSI指标的数值
rsiBuy = 0 # RSI买开阈值
rsiSell = 0 # RSI卖开阈值
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'atrMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(AtrRsiStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, self.fixedSize)
elif self.rsiValue < self.rsiSell:
self.short(bar.close-5, self.fixedSize)
# 持有多头仓位
elif self.pos > 0:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos < 0:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
# 发出状态更新事件
self.putEvent()
if __name__ == '__main__':
# 提供直接双击回测的功能
# 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错
from ctaBacktesting import *
from PyQt4 import QtCore, QtGui
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
d = {'atrLength': 11}
engine.initStrategy(AtrRsiStrategy, d)
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult()
## 跑优化
#setting = OptimizationSetting() # 新建一个优化任务设置对象
#setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利
#setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1
#setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1
#setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数
## 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版
## 测试时还跑着一堆其他的程序,性能仅供参考
#import time
#start = time.time()
## 运行单进程优化函数,自动输出结果,耗时:359秒
#engine.runOptimization(AtrRsiStrategy, setting)
## 多进程优化,耗时:89秒
##engine.runParallelOptimization(AtrRsiStrategy, setting)
#print u'耗时:%s' %(time.time()-start) | mit |
gavrieltal/opencog | opencog/python/spatiotemporal/temporal_events/animation.py | 34 | 4896 | from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation
__author__ = 'keyvan'
x_axis = xrange(13)
zeros_13 = zeros(13)
class Animation(object):
def __init__(self, event_a, event_b, event_c, plt=plt):
self.event_a = event_a
self.event_c = event_c
self.event_b_length_beginning = event_b.beginning - event_b.a
self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
self.event_b_length_total = event_b.b - event_b.ending
self.plt = plt
self.fig = plt.figure(1)
self.ax_a_b = self.fig.add_subplot(4, 1, 1)
self.ax_b_c = self.fig.add_subplot(4, 1, 2)
self.ax_a_c = self.fig.add_subplot(4, 1, 3)
self.ax_relations = self.fig.add_subplot(4, 1, 4)
self.ax_a_b.set_xlim(0, 13)
self.ax_a_b.set_ylim(0, 1)
self.ax_b_c.set_xlim(0, 13)
self.ax_b_c.set_ylim(0, 1)
self.ax_a_c.set_xlim(0, 13)
self.ax_a_c.set_ylim(0, 1)
self.rects_a_b = self.ax_a_b.bar(x_axis, zeros_13)
self.rects_b_c = self.ax_b_c.bar(x_axis, zeros_13)
self.rects_a_c = self.ax_a_c.bar(x_axis, zeros_13)
self.line_a = Line2D([], [])
self.line_b = Line2D([], [])
self.line_c = Line2D([], [])
self.ax_relations.add_line(self.line_a)
self.ax_relations.add_line(self.line_b)
self.ax_relations.add_line(self.line_c)
a = min(event_a.a, event_c.a) - self.event_b_length_total
b = max(event_a.b, event_c.b)
self.ax_relations.set_xlim(a, b + self.event_b_length_total)
self.ax_relations.set_ylim(0, 1.1)
# self.interval = TimeInterval(a, b, 150)
self.interval = TimeInterval(a, b, 2)
self.ax_a_b.xaxis.set_minor_formatter(self.ax_a_b.xaxis.get_major_formatter())
self.ax_a_b.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_b.xaxis.set_ticklabels('poDedOP')
self.ax_a_b.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_b_c.xaxis.set_minor_formatter(self.ax_b_c.xaxis.get_major_formatter())
self.ax_b_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_b_c.xaxis.set_ticklabels('poDedOP')
self.ax_b_c.xaxis.set_ticklabels('mFsSfM', minor=True)
self.ax_a_c.xaxis.set_minor_formatter(self.ax_a_c.xaxis.get_major_formatter())
self.ax_a_c.xaxis.set_minor_locator(AutoMinorLocator(2))
self.ax_a_c.xaxis.set_ticklabels('poDedOP')
self.ax_a_c.xaxis.set_ticklabels('mFsSfM', minor=True)
def init(self):
artists = []
self.line_a.set_data(self.event_a, self.event_a.membership_function)
self.line_b.set_data([], [])
self.line_c.set_data(self.event_c, self.event_c.membership_function)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
for rect, h in zip(self.rects_a_b, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, zeros_13):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_a_c, (self.event_a * self.event_c).to_list()):
rect.set_height(h)
artists.append(rect)
return artists
def animate(self, t):
interval = self.interval
B = TemporalEventTrapezium(interval[t], interval[t] + self.event_b_length_total,
interval[t] + self.event_b_length_beginning,
interval[t] + self.event_b_length_middle)
plt.figure()
B.plot().show()
a_b = (self.event_a * B).to_list()
b_c = (B * self.event_c).to_list()
self.line_b.set_data(B, B.membership_function)
artists = []
for rect, h in zip(self.rects_a_b, a_b):
rect.set_height(h)
artists.append(rect)
for rect, h in zip(self.rects_b_c, b_c):
rect.set_height(h)
artists.append(rect)
artists.append(self.line_a)
artists.append(self.line_b)
artists.append(self.line_c)
return artists
def show(self):
fr = len(self.interval) - 1
anim = animation.FuncAnimation(self.fig, self.animate, init_func=self.init,
frames=fr, interval=fr, blit=True)
self.plt.show()
if __name__ == '__main__':
anim = Animation(TemporalEventTrapezium(4, 8, 5, 7),
TemporalEventTrapezium(0, 10, 6, 9),
TemporalEventTrapezium(0.5, 11, 1, 3))
# anim.show()
| agpl-3.0 |
fzheng/codejam | lib/python2.7/site-packages/ipykernel/inprocess/ipkernel.py | 3 | 6604 | """An in-process kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
import logging
import sys
from IPython.core.interactiveshell import InteractiveShellABC
from ipykernel.jsonutil import json_clean
from traitlets import Any, Enum, Instance, List, Type
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from .socket import DummySocket
from ..iostream import OutStream, BackgroundSocket, IOPubThread
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class InProcessKernel(IPythonKernel):
#-------------------------------------------------------------------------
# InProcessKernel interface
#-------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(
Instance('ipykernel.inprocess.client.InProcessKernelClient',
allow_none=True)
)
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
default_value='inline')
raw_input_str = Any()
stdout = Any()
stderr = Any()
#-------------------------------------------------------------------------
# Kernel interface
#-------------------------------------------------------------------------
shell_class = Type(allow_none=True)
shell_streams = List()
control_stream = Any()
_underlying_iopub_socket = Instance(DummySocket, ())
iopub_thread = Instance(IOPubThread)
def _iopub_thread_default(self):
thread = IOPubThread(self._underlying_iopub_socket)
thread.start()
return thread
iopub_socket = Instance(BackgroundSocket)
def _iopub_socket_default(self):
return self.iopub_thread.background_socket
stdin_socket = Instance(DummySocket, ())
def __init__(self, **traits):
super(InProcessKernel, self).__init__(**traits)
self._underlying_iopub_socket.on_trait_change(self._io_dispatch, 'message_sent')
self.shell.kernel = self
def execute_request(self, stream, ident, parent):
""" Override for temporary IO redirection. """
with self._redirected_io():
super(InProcessKernel, self).execute_request(stream, ident, parent)
def start(self):
""" Override registration of dispatchers for streams. """
self.shell.exit_now = False
def _abort_queue(self, stream):
""" The in-process kernel doesn't abort requests. """
pass
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
msg = self.session.msg(u'input_request', content, parent)
for frontend in self.frontends:
if frontend.session.session == parent['header']['session']:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error('No frontend found for raw_input request')
return str()
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str
#-------------------------------------------------------------------------
# Protected interface
#-------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
""" Temporarily redirect IO to the kernel.
"""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
sys.stdout, sys.stderr = sys_stdout, sys_stderr
#------ Trait change handlers --------------------------------------------
def _io_dispatch(self):
""" Called when a message is sent to the IO socket.
"""
ident, msg = self.session.recv(self.iopub_socket, copy=False)
for frontend in self.frontends:
frontend.iopub_channel.call_handlers(msg)
#------ Trait initializers -----------------------------------------------
def _log_default(self):
return logging.getLogger(__name__)
def _session_default(self):
from jupyter_client.session import Session
return Session(parent=self, key=b'')
def _shell_class_default(self):
return InProcessInteractiveShell
def _stdout_default(self):
return OutStream(self.session, self.iopub_thread, u'stdout')
def _stderr_default(self):
return OutStream(self.session, self.iopub_thread, u'stderr')
#-----------------------------------------------------------------------------
# Interactive shell subclass
#-----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#-------------------------------------------------------------------------
# InteractiveShell interface
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
from ipykernel.eventloops import enable_gui
if not gui:
gui = self.kernel.gui
return enable_gui(gui, kernel=self.kernel)
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)
| mit |
artmusic0/theano-learning.part02 | Training_data/rd_file_resize_rand_gz.py | 1 | 4053 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 24 04:03:19 2015
@author: winpython
"""
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import cPickle, pickle
import gzip
thelist = np.array(['8_7', '1_12', '2_8', '3_15', '8_16', '7_1', '0_3', '1_0', '9_18', '3_3', '5_0',
'7_5', '7_3', '2_18', '6_4', '0_11', '0_12', '5_1', '0_19', '2_10', '8_2', '9_19',
'4_5', '4_10', '7_9', '9_13', '8_14', '5_12', '3_1', '6_1', '4_13', '7_4', '7_11',
'9_11', '5_4', '4_19', '5_16', '5_19', '7_6', '6_13', '8_3', '1_8', '3_19', '3_8',
'8_1', '1_19', '1_14', '7_16', '8_0', '8_6', '2_11', '8_13', '7_13', '7_19', '9_9',
'4_1', '1_11', '8_17', '3_14', '9_14', '0_16', '4_6', '5_3', '6_12', '2_14', '5_17',
'7_7', '7_15', '1_1', '4_7', '0_14', '3_6', '1_5', '1_15', '6_19', '9_3', '3_7',
'8_9', '3_10', '5_9', '1_10', '4_3', '0_2', '9_10', '2_0', '0_0', '0_10', '3_11',
'0_8', '8_5', '3_16', '8_8', '9_17', '2_12', '0_1', '4_8', '9_6', '0_4', '9_4',
'6_2', '9_16', '1_3', '7_14', '4_0', '9_15', '0_6', '9_0', '2_5', '4_16', '2_13',
'5_14', '8_15', '1_7', '1_16', '1_2', '1_4', '2_17', '8_19', '5_13', '6_18', '2_16',
'6_16', '0_13', '4_17', '5_8', '4_4', '5_15', '3_17', '6_15', '3_4', '9_12', '4_15',
'4_9', '6_8', '0_9', '1_6', '5_11', '5_7', '4_18', '2_3', '5_6', '4_11', '2_4',
'0_17', '7_17', '1_18', '3_13', '6_3', '0_5', '2_1', '3_2', '1_13', '2_9', '4_14',
'6_14', '7_10', '5_2', '8_12', '2_19', '6_5', '9_7', '9_8', '9_1', '6_6', '1_17',
'7_2', '8_4', '9_2', '5_5', '8_18', '6_11', '3_5', '4_12', '2_7', '3_18', '4_2',
'6_9', '3_0', '3_12', '1_9', '8_10', '7_8', '7_18', '6_17', '7_12', '9_5', '3_9',
'0_7', '8_11', '6_0', '6_7', '2_6', '5_10', '5_18', '0_15', '0_18', '6_10', '7_0',
'2_15', '2_2'])
final_output = np.zeros((200,784),dtype=np.float32)
final_label = np.array([8, 1, 2, 3, 8, 7, 0, 1, 9, 3, 5, 7, 7, 2, 6, 0, 0, 5, 0, 2, 8, 9, 4, 4, 7, 9, 8, 5, 3, 6, 4, 7, 7, 9, 5, 4, 5, 5, 7, 6, 8, 1, 3, 3, 8, 1, 1, 7, 8, 8, 2, 8, 7, 7, 9, 4, 1, 8, 3, 9, 0, 4, 5, 6, 2, 5, 7, 7, 1, 4, 0, 3, 1, 1, 6, 9, 3, 8, 3, 5, 1, 4, 0, 9, 2, 0, 0, 3, 0, 8, 3, 8, 9, 2, 0, 4, 9, 0, 9, 6, 9, 1, 7, 4, 9, 0, 9, 2, 4, 2, 5, 8, 1, 1, 1, 1, 2, 8, 5, 6, 2, 6, 0, 4, 5, 4, 5, 3, 6, 3, 9, 4, 4, 6, 0, 1, 5, 5, 4, 2, 5, 4, 2, 0, 7, 1, 3, 6, 0, 2, 3, 1, 2, 4, 6, 7, 5, 8, 2, 6, 9, 9, 9, 6, 1, 7, 8, 9, 5, 8, 6, 3, 4, 2, 3, 4, 6, 3, 3, 1, 8, 7, 7, 6, 7, 9, 3, 0, 8, 6, 6, 2, 5, 5, 0, 0, 6, 7, 2, 2],dtype=np.int64)
for i in range(200):
print "reading", i, "..."
pil_im = Image.open( thelist[i] + ".jpg" ).convert('L')
#imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((28, 28), Image.BILINEAR )
pil_im = np.array(pil_im)
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
#print("test")
#print(pil_im)
note = 0
for j in range(28):
for k in range(28):
final_output[i][note]= ((255 - pil_im[j][k])/225.)
note += 1
print " in ", note, "...",
print " "
print "Finished Picture..."
print "Starting label"
print "Finished Labeling..."
print "Starting cpickle"
outputandlabel = final_output, final_label
f = gzip.open("training_data_200v1.pkl.gz", 'wb')
cPickle.dump(outputandlabel, f)
f.close()
print "Finished cPickle..."
print "\ ! congradulation ! /"
#f = open("pic1.txt", "r")
'''
imshow(np.asarray(pil_im)) # before resize
pil_im = pil_im.resize((28, 28), Image.BILINEAR )
pil_im = np.array(pil_im)
#print(np.array(pil_im))
#imshow(np.asarray(pil_im))
fig = plt.figure()
plotwindow = fig.add_subplot()
plt.imshow(pil_im, cmap='gray')
plt.show()
print("test")
print(pil_im)
''' | gpl-3.0 |
microelly2/reconstruction | reconstruction/houghlines.py | 1 | 6939 | # -*- coding: utf-8 -*-
#-------------------------------------------------
#-- hough line finder
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
import cv2
import numpy as np
from matplotlib import pyplot as plt
import PySide
from PySide import QtCore, QtGui, QtSvg
import Part,Draft
f='/home/thomas/Dokumente/freecad_buch/b186_image_processing_opencv/P1210191.JPG'
# f='/home/thomas/Dokumente/freecad_buch/b186_image_processing_opencv/bn_454.png'
scaler=1000
def fclinev2(x1,y1,x2,y2):
v=FreeCAD.Vector(float(x1)*scaler,float(y1)*scaler,0)
v2=FreeCAD.Vector(float(x2)*scaler,float(y2)*scaler,0)
l=Part.makeLine(v,v2)
return l
def main(filename,canny1=100,canny2=200,rho=1,theta=1, threshold=10, minLineLength =25, maxLineGap =10,
showimage=False,showimagewithlines=False,newDocument=True):
# def main(f):
f=filename
im = cv2.imread(f)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,canny1,canny2)
xsize=len(im[0])
ysize=len(im)
#image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]])
lines = cv2.HoughLinesP(edges,1,np.pi/180*theta,threshold, minLineLength = minLineLength, maxLineGap = maxLineGap)
# lines = cv2.HoughLinesP(edges,1,np.pi/180,10, minLineLength = 25, maxLineGap = 10)
#lines = cv2.HoughLinesP(edges,1,np.pi/2,2)[0]
k=0
fclines=[]
for l in lines:
k += 1
[[x1,y1,x2,y2]] = l
fl=fclinev2(x1,-y1,x2,-y2)
fclines.append(fl)
#print (x1,y1,x2,y2)
a=cv2.line(im,(x1,y1),(x2,y2),(0,255,255),2)
c=Part.makeCompound(fclines)
c.Placement.Base=FreeCAD.Vector(-xsize/2*scaler,ysize/2*scaler,0)
if newDocument:
d=App.newDocument("HoughLines")
# App.setActiveDocument("Unnamed1")
# App.ActiveDocument=d
# Gui.ActiveDocument=Gui.getDocument("Unnamed1")
Part.show(c)
cv2.imwrite('/tmp/out.png',im)
import Image, ImageGui
#ImageGui.open(unicode("/tmp/out.png","utf-8"))
if showimage:
fimg=App.activeDocument().addObject('Image::ImagePlane','Image 2')
fimg.Label=f
fimg.ImageFile = f
fimg.XSize = xsize*scaler
fimg.YSize = ysize*scaler
fimg.Placement.Base.z=-5
if showimagewithlines:
fimg=App.activeDocument().addObject('Image::ImagePlane','Image with Houghlines')
fimg.ImageFile = '/tmp/out.png'
fimg.XSize = xsize*scaler
fimg.YSize = ysize*scaler
fimg.Placement.Base.z=-10
FreeCADGui.SendMsgToActiveView("ViewFit")
print ("lines:",k)
s6='''
VerticalLayout:
id:'main'
setFixedHeight: 900
setFixedWidth: 730
setFixedWidth: 700
move: PySide.QtCore.QPoint(3000,100)
QtGui.QLabel:
setText:"*** C O M P U T E H O U G H L I N E S F O R A N I M A G E ***"
QtGui.QLabel:
QtGui.QLineEdit:
setText:"/home/thomas/Bilder/houghlines/P1210172.JPG"
id: 'bl'
QtGui.QPushButton:
setText: "Get Image Filename"
clicked.connect: app.getfn
QtGui.QLabel:
setText:"Scale"
QtGui.QLabel:
QtGui.QSlider:
id:'scaler'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 2000
setTickInterval: 100
setValue: 500
setTickPosition: QtGui.QSlider.TicksBelow
QtGui.QLabel:
# QtGui.QCheckBox:
# id: 'elevation'
# setText: 'Process Elevation Data'
QtGui.QLabel:
setText:"C a n n y E d g e gradient 0 - 400"
QtGui.QLabel:
QtGui.QSlider:
id:'canny1'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 400
setTickInterval: 10
setValue: 100
setTickPosition: QtGui.QSlider.TicksBelow
QtGui.QSlider:
id:'canny2'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 400
setTickInterval: 10
setValue: 200
QtGui.QLabel:
QtGui.QLabel:
setText:"p r o b a b i l i s t i c H o u g h t r a n s f o r m "
QtGui.QLabel:
QtGui.QLabel:
setText:"rho = Distance resolution of the accumulator in pixels"
QtGui.QSlider:
id:'rho'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 100
setTickInterval: 10
setValue: 25
setTickPosition: QtGui.QSlider.TicksBothSides
QtGui.QLabel:
setText:"theta = Angle resolution of the accumulator in Degree."
QtGui.QSlider:
id:'theta'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 180
setTickInterval: 10
setValue: 1
setTickPosition: QtGui.QSlider.TicksBothSides
QtGui.QLabel:
setText:"threshold = Accumulator threshold parameter. Only those lines are returned that get enough votes."
QtGui.QSlider:
id:'threshold'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 100
setTickInterval: 10
setValue: 10
setTickPosition: QtGui.QSlider.TicksBothSides
QtGui.QLabel:
setText:"minLineLength = Minimum line length. Line segments shorter than that are rejected."
QtGui.QSlider:
id:'minLineLength'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 100
setTickInterval: 10
setValue: 25
setTickPosition: QtGui.QSlider.TicksBothSides
QtGui.QLabel:
setText:"maxLineGap = Maximum allowed gap between points on the same line to link them."
QtGui.QSlider:
id:'maxLineGap'
setOrientation: PySide.QtCore.Qt.Orientation.Horizontal
setMinimum: 0
setMaximum: 100
setTickInterval: 10
setValue: 10
setTickPosition: QtGui.QSlider.TicksBothSides
QtGui.QLabel:
QtGui.QLabel:
setText:"O U T P U T:"
id: "status"
QtGui.QCheckBox:
id: 'showimage'
setText: 'Show Image'
QtGui.QCheckBox:
id: 'showimagewithlines'
setText: 'Show Image with Lines'
QtGui.QCheckBox:
id: 'newDocument'
setText: 'create new Document'
setChecked: True
QtGui.QPushButton:
setText: "Run values"
clicked.connect: app.run
'''
import FreeCAD,FreeCADGui
class MyApp(object):
def run(self):
print "run app"
filename=self.root.ids['bl'].text()
#main(s.text())
main(
filename,
self.root.ids['canny1'].value(),
self.root.ids['canny2'].value(),
self.root.ids['rho'].value(),
self.root.ids['theta'].value(),
self.root.ids['threshold'].value(),
self.root.ids['minLineLength'].value(),
self.root.ids['maxLineGap'].value(),
self.root.ids['showimage'].isChecked(),
self.root.ids['showimagewithlines'].isChecked(),
self.root.ids['newDocument'].isChecked(),
)
# main(filename,canny1=100,canny2=200,rho=1,theta=1, threshold=10, minLineLength =25, maxLineGap =10)
def getfn(self):
fileName = QtGui.QFileDialog.getOpenFileName(None,u"Open File",u"/home/thomas/Bilder/houghlines",u"Images (*.png *.xpm *.jpg)");
print fileName
s=self.root.ids['bl']
s.setText(fileName[0])
#def findHoughLines():
def run():
print "huhu"
app=MyApp()
import geodat
import geodat.miki as miki
reload(miki)
miki=miki.Miki()
miki.app=app
app.root=miki
miki.parse2(s6)
miki.run(s6)
#findHoughLines()
| lgpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_Rot_cont/Geneva_Rot_cont_age6/Optical2.py | 33 | 7437 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
line = [56, #AR 4 4740
58, #4861
59, #O III 4959
60, #O 3 5007
61, #N 1 5200
63, #O 1 5577
64, #N 2 5755
65, #HE 1 5876
66, #O 1 6300
67, #S 3 6312
68, #O 1 6363
69, #H 1 6563
70, #N 2 6584
71, #S II 6716
72, #S 2 6720
73] #S II 6731
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Optical_lines_cntd.pdf')
plt.clf()
| gpl-2.0 |
gizela/gizela | gizela/pyplot/PlotPoint.py | 1 | 7313 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <[email protected]>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id: PlotPoint.py 119 2011-01-11 23:44:34Z tomaskubin $
from gizela.util.Error import Error
class PlotPointError(Error): pass
class PlotPoint(object):
'''class for plotting of geodetic points with error ellipse
'''
@classmethod
def plot_point_xy(cls, figure, x, y, style):
"plots point - position with marker"
if figure.is_swap_xy():
y, x = x, y
line, = figure.gca().plot([x], [y])
figure.set_style(style, line)
@classmethod
def plot_point_dot(cls, figure, point, style):
cls.plot_point_xy(figure, point.x, point.y, style)
@classmethod
def plot_point_x(cls, figure, point, x, style):
cls.plot_point_xy(figure, x, point.x, style)
@classmethod
def plot_point_y(cls, figure, point, x, style):
cls.plot_point_xy(figure, x, point.y, style)
@classmethod
def plot_point_z(cls, figure, point, x, style):
cls.plot_point_xy(figure, x, point.z, style)
@classmethod
def plot_label_xy(cls, figure, id, x, y, style):
"plot point id"
if figure.is_swap_xy():
y, x = x, y
text = figure.gca().text(x, y, id,
transform=figure.get_label_tran())
figure.set_style(style, text)
@classmethod
def plot_point_label(cls, figure, point, style):
cls.plot_label_xy(figure, point.id, point.x, point.y, style)
@classmethod
def plot_error_ellipse_xy(cls, figure, x, y, abom, style):
"""
plots standard error ellipse
x, y : position of center of ellipse
abom : a, b, om parameters of ellipse
method do not handle axes orientation (self.figure.axesXY)
"""
from matplotlib.patches import Ellipse
from math import pi
a, b, om = abom
#print "swapXY:", figure.is_swap_xy()
if figure.is_swap_xy():
y, x = x, y
om = pi/2 - om
#om = om - pi/2
ell = Ellipse((x, y), 2*a, 2*b, om*180.0/pi) #?
#transform=self.axes.transData + self.ellTrans)
figure.set_style(style, ell)
ell.set_clip_box(figure.gca().bbox) # cut edges outside axes box?
figure.gca().add_artist(ell)
@classmethod
def plot_point_error_ellipse(cls, figure, point, ellScale, style):
abom = point.errEll
abom[0] *= ellScale
abom[1] *= ellScale
import sys
print >>sys.stderr, point.id, ":s_x s_y:", point.stdevx, point.stdevy
print >>sys.stderr, point.id, ":xi,yi,zi:", point.xi, point.yi, point.zi
print >>sys.stderr, point.id, ":covmat:", point.covmat
print >>sys.stderr, point.id, ":covmat:", point.covmat.data
print >>sys.stderr, point.id, ":ell:", point.errEll
print >>sys.stderr, point.id, ":scale:", ellScale
print >>sys.stderr, point.id, ":ell_scaled:", abom
cls.plot_error_ellipse_xy(figure, point.x, point.y, abom, style)
@classmethod
def _compute_segments(cls, x, y):
"returns segments of line divided by Nones"
# find line segments for Nones
xx, yy = [], [] # lines without None(s)
xn, yn = [], [] # line with None(s)
xl, yl = None, None # last not None coordinate
new = True # new segment of line?
for xi, yi in zip(x, y):
if xi == None or yi == None:
new = True
else:
if new:
# start new line
xx.append([xi])
yy.append([yi])
if xl != None:
# add line for Nones
xn.append([xl, xi])
yn.append([yl, yi])
else:
# add next point to line
xx[-1].append(xi)
yy[-1].append(yi)
new = False
xl, yl = xi, yi # save xi as last not None values
return xx, xn, yy, yn
@classmethod
def plot_vector_xy(cls, figure, x, y, style, styleNone):
"""
plot vector
just line with specific style
style: style for line
styleNone: style for connections with None values
x, y: lists of coordinates
"""
if figure.is_swap_xy():
x, y = y, x
#from matplotlib.patches import FancyArrowPatch, ArrowStyle
#arr = Arrow(x, y, dx, dy)
#figure.axes.add_artist(arr)
xx, xn, yy, yn = cls._compute_segments(x, y)
#yy, yn = cls._compute_segments(y)
# draw lines
for x, y in zip(xx, yy):
line, = figure.gca().plot(x, y)
figure.set_style(style, line)
# draw lines for Nones
for x, y in zip(xn, yn):
line, = figure.gca().plot(x, y)
figure.set_style(styleNone, line)
@classmethod
def plot_vector(cls, figure, pointList, style, styleNone):
x = []; y = []
for p in pointList:
x.append(p.x)
y.append(p.y)
cls.plot_vector_xy(figure, x, y, style, styleNone)
@classmethod
def plot_y_stdev(cls, figure, x, y, stdev, style):
"plots 1sigma interval for standard deviation along y axis"
#print "stdev_all:", stdev
#print "errScale:", figure.errScale
#print "confScale:", figure.stdev.get_conf_scale_1d()
# line
line, = figure.gca().plot([x, x],
[y - stdev, y + stdev])
line.set_solid_capstyle("butt")
figure.set_style(style, line)
if __name__ == "__main__":
from gizela.pyplot.FigureLayoutBase import FigureLayoutBase
fig = FigureLayoutBase()
from gizela.data.PointCartCovMat import PointCartCovMat
p = PointCartCovMat(id="A", x=0, y=0)
p.var = (9, 4)
print p.errEll
style = {}
PlotPoint.plot_point_xy(fig, p.x, p.y, style)
PlotPoint.plot_label_xy(fig, p.id, p.x, p.y, style)
PlotPoint.plot_error_ellipse_xy(fig, p.x, p.y, p.errEll, style)
PlotPoint.plot_vector_xy(fig, [p.x, p.x + 3, p.x + 1.0],
[p.y, p.y - 2, p.y - 1.5], style, style)
fig.gca().axis([-4,4,-4,4])
fig.set_aspect_equal()
#fig.show_()
ori = ("en", "wn", "ne", "nw", "es", "ws", "se", "sw")
for o in ori:
fig = FigureLayoutBase(axesOri=o)
from math import pi
fig.config["errorEllipseStyle"]["alpha"] = 0.2
PlotPoint.plot_error_ellipse_xy(fig, x=0.5, y=0.2,
abom=(0.3,0.1,20*pi/200),
style="errorEllipseStyle")
fig.config["errorEllipseStyle"]["alpha"] = 1
PlotPoint.plot_error_ellipse_xy(fig, x=0.5, y=0.2,
abom=(0.3,0.1,0),
style="errorEllipseStyle")
print fig.get_axes_ori()
print fig.is_swap_xy()
fig.show_()
| gpl-3.0 |
bzero/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
CagriLatifoglu/cagrilatifoglu.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/lines_bars_and_markers/scatter_hist.py | 1 | 2113 | """
============
Scatter Hist
============
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Fixing random state for reproducibility
np.random.seed(19680801)
# the random data
x = np.random.randn(1000)
y = np.random.randn(1000)
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(1, figsize=(8, 8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axScatter.scatter(x, y)
# now determine nice limits by hand:
binwidth = 0.25
xymax = max(np.max(np.abs(x)), np.max(np.abs(y)))
lim = (int(xymax/binwidth) + 1) * binwidth
axScatter.set_xlim((-lim, lim))
axScatter.set_ylim((-lim, lim))
bins = np.arange(-lim, lim + binwidth, binwidth)
axHistx.hist(x, bins=bins)
axHisty.hist(y, bins=bins, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
pltshow(plt)
| mit |
ZhangJUJU/TransferLearning | DANN/utils.py | 1 | 3653 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
# Model construction utilities below adapted from
# https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def shuffle_aligned_list(data):
"""Shuffle arrays in a list by shuffling each array identically."""
num = data[0].shape[0]
p = np.random.permutation(num)
return [d[p] for d in data]
def batch_generator(data, batch_size, shuffle=True):
"""Generate batches of data.
Given a list of array-like objects, generate batches of a given
size by yielding a list of array-like objects corresponding to the
same slice of each input.
"""
if shuffle:
data = shuffle_aligned_list(data)
batch_count = 0
while True:
if batch_count * batch_size + batch_size >= len(data[0]):
batch_count = 0
if shuffle:
data = shuffle_aligned_list(data)
start = batch_count * batch_size
end = start + batch_size
batch_count += 1
yield [d[start:end] for d in data]
def imshow_grid(images, shape=[2, 8]):
"""Plot images in a grid of a given shape."""
fig = plt.figure(1)
grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)
size = shape[0] * shape[1]
for i in range(size):
grid[i].axis('off')
grid[i].imshow(images[i]) # The AxesGrid object work as a list of axes.
plt.show()
def plot_embedding(X, y, d, title=None):
""" Plot an embedding X with the class label y colored by the domain d. """
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
# Plot colors numbers
plt.figure(figsize=(10, 10))
ax = plt.subplot(111)
for i in range(X.shape[0]):
# plot colored number
plt.text(X[i, 0], X[i, 1], str(y[i]),
color=plt.cm.bwr(d[i] / 1.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
plt.show()
def dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + np.int32(labels_dense).ravel()] = 1
return labels_one_hot
def imshow_grid(images, shape=[2, 8]):
""" Plot images in a grid of a given shape. """
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure(1)
grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)
size = shape[0] * shape[1]
for i in range(size):
grid[i].axis('off')
grid[i].imshow(images[i]) # The AxisGrid object work as a list of axes
def imshow_grid(images, shape=[2, 8]):
""" Plot images in a grid of a given shape. """
from mpl_toolkits.axes_grid1 import ImageGrid
fig = plt.figure(1)
grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)
size = shape[0] * shape[1]
for i in range(size):
grid[i].axis('off')
grid[i].imshow(images[i]) # The AxisGrid object work as a list of axes
plt.show() | mit |
mfjb/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
ozak/gis-raster-tools | gisrastertools/gisrastertools.py | 1 | 7129 | #!/usr/bin/env python
# coding: utf-8
'''
Copyright (C) 2014 Ömer Özak
This program defines functions that are useful for working with GIS data
Usage:
import gisrastertools
or
from gisrastertools import *
======================================================
Author: Ömer Özak, 2013--2014 (ozak at smu.edu)
Website: http://omerozak.com
GitHub: https://github.com/ozak/
======================================================
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
import numpy as np
import pandas as pd
from osgeo import gdal, gdalnumeric, ogr, osr
from gdalconst import *
from skimage.measure import block_reduce
# Function to read the original file's projection:
def get_geo_info(FileName):
''' Gets information from a Raster data set
'''
SourceDS = gdal.Open(FileName, GA_ReadOnly)
NDV = SourceDS.GetRasterBand(1).GetNoDataValue()
xsize = SourceDS.RasterXSize
ysize = SourceDS.RasterYSize
GeoT = SourceDS.GetGeoTransform()
Projection = osr.SpatialReference()
Projection.ImportFromWkt(SourceDS.GetProjectionRef())
DataType = SourceDS.GetRasterBand(1).DataType
DataType = gdal.GetDataTypeName(DataType)
return NDV, xsize, ysize, GeoT, Projection, DataType
# Function to map location in pixel of raster array
def map_pixel(point_x, point_y, cellx, celly, xmin, ymax):
'''
Usage: map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax)
where:
xmin is leftmost X coordinate in system
ymax is topmost Y coordinate in system
Example:
raster = HMISea.tif'
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(raster)
col, row = map_pixel(x,y,GeoT[1],GeoT[-1], GeoT[0],GeoT[3])
'''
point_x=np.array(point_x)
point_y=np.array(point_y)
col = np.around((point_x - xmin) / cellx).astype(int)
row = np.around((point_y - ymax) / celly).astype(int)
return col,row
# Aggregate raster to higher resolution using sums
def aggregate(raster,NDV,block_size):
'''
Aggregate raster to smaller resolution, by adding cells.
Usage:
aggregate(raster,NDV,block_size)
where
raster is a Numpy array created by importing the raster (e.g. GeoTiff)
NDV is the NoData Value for the raster (can be read using the GetGeoInfo function)
block_size is a duple of factors by which the raster will be shrinked
Example:
raster = HMISea.tif'
NDV, xsize, ysize, GeoT, Projection, DataType = GetGeoInfo(raster)
costs = load_tiff(raster)
costs2=aggregate(costs,NDV,(10,10))
'''
raster2=np.where(raster==NDV,0,raster)
raster3=block_reduce(raster2,block_size,func=np.sum)
raster2=np.where(raster==NDV,NDV,0)
raster4=block_reduce(raster2,block_size,func=np.sum)
raster2=np.where(raster4<0,NDV,raster3)
return raster2
# Function to write a new file.
def create_geotiff(Name, Array, driver, NDV, xsize, ysize, GeoT, Projection, DataType):
'''
Creates new GeoTiff from array
'''
if type(DataType)!=np.int:
if DataType.startswith('gdal.GDT_')==False:
DataType=eval('gdal.GDT_'+DataType)
NewFileName = Name+'.tif'
# Set nans to the original No Data Value
Array[np.isnan(Array)] = NDV
# Set up the dataset
DataSet = driver.Create( NewFileName, xsize, ysize, 1, DataType)
# the '1' is for band 1.
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection( Projection.ExportToWkt() )
# Write the array
DataSet.GetRasterBand(1).WriteArray( Array )
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
return NewFileName
# Function to aggregate and align rasters
def align_rasters(raster,alignraster,how=np.mean,cxsize=None,cysize=None,masked=False):
'''
Align two rasters so that data overlaps by geographical location
Usage: (alignedraster_o, alignedraster_a, GeoT_a) = AlignRasters(raster, alignraster, how=np.mean)
where
raster: string with location of raster to be aligned
alignraster: string with location of raster to which raster will be aligned
how: function used to aggregate cells (if the rasters have different sizes)
It is assumed that both rasters have the same size
'''
NDV1, xsize1, ysize1, GeoT1, Projection1, DataType1=GetGeoInfo(raster)
NDV2, xsize2, ysize2, GeoT2, Projection2, DataType2=GetGeoInfo(alignraster)
if Projection1.ExportToMICoordSys()==Projection2.ExportToMICoordSys():
blocksize=(np.round(GeoT2[1]/GeoT1[1]),np.round(GeoT2[-1]/GeoT1[-1]))
mraster=gdalnumeric.LoadFile(raster)
mraster=np.ma.masked_array(mraster, mask=mraster==NDV1, fill_value=NDV1)
mmin=mraster.min()
mraster=block_reduce(mraster,blocksize,func=how)
araster=gdalnumeric.LoadFile(alignraster)
araster=np.ma.masked_array(araster, mask=araster==NDV2, fill_value=NDV2)
amin=araster.min()
if GeoT1[0]<=GeoT2[0]:
mcol,row3=map_pixel(GeoT2[0], GeoT2[3], GeoT1[1] *blocksize[0],GeoT1[-1]*blocksize[1], GeoT1[0], GeoT1[3])
acol=0
else:
acol,row3=map_pixel(GeoT1[0], GeoT1[3], GeoT2[1],GeoT2[-1], GeoT2[0], GeoT2[3])
mcol=0
if GeoT1[3]<=GeoT2[3]:
col3,arow=map_pixel(GeoT1[0], GeoT1[3], GeoT2[1],GeoT2[-1], GeoT2[0], GeoT2[3])
mrow=0
else:
col3,mrow=map_pixel(GeoT2[0], GeoT2[3], GeoT1[1] *blocksize[0],GeoT1[-1]*blocksize[1], GeoT1[0], GeoT1[3])
arow=0
'''
col3,row3=map_pixel(GeoT1[0], GeoT1[3], GeoT2[1],GeoT2[-1], GeoT2[0], GeoT2[3])
col3=max(0,col3)
row3=max(0,row3)
araster=araster[row3:,col3:]
col3,row3=map_pixel(GeoT2[0], GeoT2[3], GeoT1[1] *blocksize[0],GeoT1[-1]*blocksize[1], GeoT1[0], GeoT1[3])
col3=max(0,abs(col3))
row3=max(0,np.abs(row3))
mraster=mraster[row3:,col3:]
'''
mraster=mraster[mrow:,mcol:]
araster=araster[arow:,acol:]
if cxsize and cysize:
araster=araster[:cysize,:cxsize]
mraster=mraster[:cysize,:cxsize]
else:
rows = min(araster.shape[0],mraster.shape[0])
cols = min(araster.shape[1],mraster.shape[1])
araster=araster[:rows,:cols]
mraster=mraster[:rows,:cols]
#mraster=mraster[row3:rows+row3,col3:cols+col3]
if masked:
mraster=np.ma.masked_array(mraster,mask=mraster<mmin, fill_value=NDV1)
araster=np.ma.masked_array(araster,mask=araster<amin, fill_value=NDV2)
GeoT=(max(GeoT1[0],GeoT2[0]), GeoT1[1]*blocksize[0], GeoT1[2], min(GeoT1[3],GeoT2[3]), GeoT1[4] ,GeoT1[-1]*blocksize[1])
return (mraster,araster,GeoT)
else:
print "Rasters need to be in same projection"
return (-1,-1,-1)
# Load GeoTif raster data
def load_tiff(file):
"""
Load a GeoTiff raster keeping NDV values using a masked array
Usage:
data=LoadTiffRaster(file)
"""
NDV, xsize, ysize, GeoT, Projection, DataType=GetGeoInfo(file)
data=gdalnumeric.LoadFile(file)
data=np.ma.masked_array(data, mask=data==NDV,fill_value=-np.inf)
return data
| gpl-3.0 |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/core/plot/memory.py | 1 | 7242 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.plot.memory Contains the MemoryPlotter class, used for creating plots of the memory consumption
# of a SKIRT simulation as a function of time.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
# Import the relevant PTS classes and modules
from ..basics.map import Map
from .plotter import Plotter
from ..tools.logging import log
from ..tools import filesystem as fs
# -----------------------------------------------------------------
class MemoryPlotter(Plotter):
"""
This class ...
"""
def __init__(self):
"""
The constructor ...
:return:
"""
# Call the constructor of the base class
super(MemoryPlotter, self).__init__()
# -- Attributes --
# A data structure to store the memory (de)allocation information
self.allocation = None
# -----------------------------------------------------------------
@staticmethod
def default_input():
"""
This function ...
:return:
"""
return "memory.dat"
# -----------------------------------------------------------------
def prepare_data(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Preparing the input data into plottable format...")
# Get the number of processes
ranks = np.unique(self.table["Process rank"])
assert len(ranks) == max(ranks) + 1
processes = len(ranks)
# Initialize the data structure to contain the memory usage information in plottable format
self.data = [Map({"times": [], "memory": []}) for i in range(processes)]
# Loop over the different entries in the memory table
for i in range(len(self.table)):
# Get the process rank
rank = self.table["Process rank"][i]
# Get the time and memory usage
time = self.table["Simulation time"][i]
memory = self.table["Memory usage"][i]
# Add the data point to the data structure
self.data[rank].times.append(time)
self.data[rank].memory.append(memory)
# Check whether (de)allocation information is present in the memory table
if "Array (de)allocation" in self.table.colnames:
# Initialize the data structure for plotting the memory usage of the root process and the memory
# allocation curve
self.allocation = Map({"times": [], "allocation": [], "cumulative": []})
# Get the mask covering entries that do not contain array (de)allocation information
mask = self.table["Array (de)allocation"].mask
# Check whether the first entry of the table corresponds to the root process
assert self.table["Process rank"][0] == 0
# Create a variable to store the cumulative sum of allocated memory
cumulative_sum = 0.0
# Loop over the different entries in the memory table
for i in range(len(self.table)):
# Get the process rank
rank = self.table["Process rank"][i]
# Only add the contributions from the root process
if rank > 0: break
# If the entry is masked because it does not contain memory allocation information, skip it
if mask[i]: continue
# Get the time and the amount of (de)allocated memory
time = self.table["Simulation time"][i]
allocation = self.table["Array (de)allocation"][i]
# Add the allocated memory to the sum
cumulative_sum += allocation
# Add the data point to the data structure
self.allocation.times.append(time)
self.allocation.allocation.append(allocation)
self.allocation.cumulative.append(cumulative_sum)
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Making the plots...")
# Make a plot of the memory usage as a function of time
self.plot_memory()
# Make a plot of the memory (de)allocation information, if present
if self.allocation is not None: self.plot_allocation()
# -----------------------------------------------------------------
def plot_memory(self):
"""
This function ...
:return:
"""
# Determine the path to the plot file
plot_path = fs.join(self.output_path, "memory.pdf")
# Initialize figure
plt.figure()
plt.clf()
# Loop over the different processes
for rank in range(len(self.data)):
# Name of the current process
process = "P" + str(rank)
# Plot the memory usage
plt.plot(self.data[rank].times, self.data[rank].memory, label=process)
# Set the axis labels
plt.xlabel("Time (s)", fontsize='large')
plt.ylabel("Memory usage (GB)", fontsize='large')
# Set the plot title
plt.title("Memory consumption")
# Set the legend
if len(self.data) > 16: plt.legend(loc='upper center', ncol=8, bbox_to_anchor=(0.5, -0.1), prop={'size': 8})
else: plt.legend(loc='lower right', ncol=4, prop={'size': 8})
# Save the figure
plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
def plot_allocation(self):
"""
This function ...
:return:
"""
# Determine the path to the plot file
plot_path = fs.join(self.output_path, "allocation.pdf")
# Initialize figure
plt.figure()
plt.clf()
# Plot the memory usage of the root process
plt.plot(self.data[0].times, self.data[0].memory, label="total memory usage")
# Plot the memory allocation of the root process
plt.step(self.allocation.times, self.allocation.cumulative, where="post", linestyle="--", label="allocated array memory")
# Set the axis labels
plt.xlabel("Time (s)", fontsize='large')
plt.ylabel("Memory usage (GB)", fontsize='large')
# Set the plot title
plt.title("Memory (de)allocation")
# Set the legend
plt.legend(loc='lower right', prop={'size': 8})
# Save the figure
plt.savefig(plot_path, bbox_inches='tight', pad_inches=0.25)
plt.close()
# -----------------------------------------------------------------
| mit |
bigdig/vnpy | account/backtest.py | 1 | 6440 | # encoding: utf-8
'''
组合策略测试
'''
import sys
sys.path.append('../../')
from vnpy.app.cta_strategy.strategies.strategyMulti import MultiStrategy
import argparse
import pandas as pd
import numpy as np
from datetime import datetime
from setup_logger import setup_logger
setup_logger(filename='logsBackTest/vnpy_{0}.log'.format(datetime.now().strftime('%m%d_%H%M')), debug=False)
from vnpy.app.cta_strategy.backtesting import BacktestingEngine, OptimizationSetting
from vnpy.app.cta_strategy.backtestingPatch import BacktestingEnginePatch
from datetime import datetime,date,timedelta
import time
import json
import traceback
########################################################################
'''
backtesting
'''
def backtesting(settingFile, kLineCycle = 30, vt_symbol = 'rb1801', vt_symbol2 = None, mode = 'B', startDate = None, days = 1, historyDays = 0, optimization = False):
# 创建回测引擎
engine = BacktestingEnginePatch()
# 设置回测用的数据起始日期
if startDate:
startDate = startDate
endDate = datetime.strptime(startDate, '%Y%m%d') + timedelta(days)
else:
startDate = date.today() - timedelta(days + historyDays)
endDate = date.today()
engine.set_parameters(
vt_symbol=vt_symbol,
interval="1m",
start= startDate,
end=endDate,
rate=1/10000,
slippage=1,
size=10,
pricetick=1,
capital=1_000_000,
)
setting = {}
setting['vt_symbol'] = vt_symbol
setting['kLineCycle'] = kLineCycle
setting['settingFile'] = settingFile
engine.add_strategy(MultiStrategy, setting)
engine.load_data()
engine.run_backtesting()
df = engine.calculate_result()
engine.calculate_statistics()
#engine.show_chart()
# 显示回测结果
resultList = engine.showBacktestingResult()
# try:
# engine.showDailyResult()
# except:
# print ('-' * 20)
# print ('Failed to showDailyResult')
# #traceback.print_exc()
# pass
try:
# 显示定单信息
import pandas as pd
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
try:
orders['holdTime'] = (orders.exitDt - orders.entryDt).astype('timedelta64[m]')
except:
pass
pd.options.display.max_rows = 100
pd.options.display.width = 300
pd.options.display.precision = 2
engine.output ('-' * 50)
engine.output(str(orders))
except:
print ('-' * 20)
print ('Failed to print result')
#traceback.print_exc()
try:
# 显示详细信息
import pandas as pd
from utils import plot_candles, plot_candles1
import talib
import numpy as np
# analysis
#engine.loadHistoryData()
orders = pd.DataFrame([i.__dict__ for i in resultList['resultList']])
pricing = pd.DataFrame([i.__dict__ for i in engine.history_data])
#VPIN analysis
# from .VPINAnalysis import VPINAnalysis
# if len(pricing.index) > 1000:
# VPINAnalysis(pricing)
atr = talib.ATR(pricing.high_price.values, pricing.low_price.values, pricing.close_price.values, 25)
atr_ma = pd.DataFrame(atr).rolling(25).mean()[0].values
technicals = {
'rsi': talib.RSI(pricing.close_price.values, 4),
'atr': atr,
'atr-ma': atr_ma
}
technicals = {}
plot_candles1(pricing, volume_bars=True, orders=orders, technicals=technicals)
except:
print ('-' * 20)
print ('Failed to plot candles')
traceback.print_exc()
def main(argv):
# setup the argument parser
arg_parser = argparse.ArgumentParser(description='backtest')
arg_parser.add_argument('-m', '--mode',
required=False,
default='B',
help="set backtest mode(B or T)")
arg_parser.add_argument('-d', '--days',
required=False,
default=1,
type = int,
help="set backtest days")
arg_parser.add_argument('-sd', '--startDate',
required=False,
default='',
help="set backtest days")
arg_parser.add_argument('-s', '--vt_symbol',
required=False,
default='rb1801',
help="set backtest vt_symbol")
arg_parser.add_argument('-s2', '--vt_symbol2',
required=False,
default='',
help="set spread vt_symbol2")
arg_parser.add_argument('-hd', '--historyDays',
required=False,
default=0,
type = int,
help="set history days")
arg_parser.add_argument('-sf', '--settingFile',
required=False,
default='CTA_setting_multi.json',
help="setting file name")
arg_parser.add_argument('-o', '--optimization',
required=False,
default=False,
type = bool,
help="parameter optimization")
arg_parser.add_argument('-yappi', '--yappi',
required=False,
default=False,
type = bool,
help="yappi status")
# parse arguments
cmd = arg_parser.parse_args(argv)
if cmd.yappi:
import yappi
yappi.set_clock_type("cpu")
yappi.start()
backtesting(settingFile = cmd.settingFile, startDate = cmd.startDate, days = cmd.days, mode = cmd.mode,vt_symbol = cmd.vt_symbol, vt_symbol2 = cmd.vt_symbol2, historyDays = cmd.historyDays , optimization = cmd.optimization)
if cmd.yappi:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
if __name__ == "__main__":
main(sys.argv[1:])
#main("-d 1 -s rb1905 -hd 0 -sf CTA_setting_Spread.json -s2 rb1910 -m T".split())
#main('-d 240 -s rb000.SHFE -sf CTA_setting_alpha_real_rb.json'.split())
| mit |
paulray/NICERsoft | nicer/fillgaps.py | 1 | 23795 | """
A script to fill data gaps in a way that retains the noise characteristic of the live data set and preserves AVAR curves. An implementation of the algorithm described in:
Howe and Schlossberger, "Strategy for Characterizing Frequency Stability Measurements having Multiple Dead Times"
"""
import sys
import argparse
import numpy as np
import copy
from scipy.stats import median_abs_deviation
import csv
import matplotlib.pyplot as plt
def check_right(data, gaps, curgap, curgap_size, curgap_num, gap_total):
"""A method to check the right-hand side of the data to obtain the correct number of points to fill the gap.
Parameters:
data - np.array, the data being imputed
gaps - dict, stores the gap indeces with their respective number
curgap - tuple, (start, end) indeces for current gap
curgap_size - int, size of current gap
curgap_num - int, number of current gap
gap_total - int, total number of gaps
"""
if (
len(data) - 1 - curgap[1]
) >= curgap_size: # if enough data to the right of the gap
if curgap_num == gap_total: # if on the last gap
pts_to_reflect = copy.deepcopy(
data[(curgap[1] + 1) : (curgap[1] + 1 + curgap_size)]
) # take the next number of data points required to fill up the gap
return pts_to_reflect, "right"
elif (gaps[curgap_num + 1][0] - 1) - (
curgap[1] + 1
) >= curgap_size: # if continuous run between this gap and the next has enough points to fill the gap
pts_to_reflect = copy.deepcopy(
data[(curgap[1] + 1) : (curgap[1] + 1 + curgap_size)]
) # take the next number of data points required to fill up the gap
return pts_to_reflect, "right"
else:
# not enough data points between this gap and the next, either print error message and exit or impute from left and right sides
pts_to_reflect = check_both_sides(
data, gaps, curgap, curgap_size, curgap_num, gap_total
)
if pts_to_reflect is None:
return None, None
else:
return pts_to_reflect, "both"
else:
pts_to_reflect = check_both_sides(
data, gaps, curgap, curgap_size, curgap_num, gap_total
)
if pts_to_reflect is None:
return None, None
else:
return pts_to_reflect, "both"
def check_both_sides(data, gaps, curgap, curgap_size, curgap_num, gap_total):
"""A method to check both sides of the data to obtain the correct number of points to fill the gap (half left, half right).
Parameters:
data - np.array, the data being imputed
gaps - dict, stores the gap indeces with their respective number
curgap - tuple, (start, end) indeces for current gap
curgap_size - int, size of current gap
curgap_num - int, number of current gap
gap_total - int, total number of gaps
"""
# either print error message and exit or impute from left and right sides
if curgap_num == 1: # if this is the first gap in the dataset
if (
(gap_total == 1)
and (curgap[0] >= curgap_size / 2)
and (curgap[1] <= len(data) - (curgap_size / 2))
): # if only gap
pts_to_reflect_left = copy.deepcopy(
data[int(curgap[0] - curgap_size / 2) : (curgap[0])]
)
pts_to_reflect_right = copy.deepcopy(
data[(curgap[1] + 1) : int(curgap[1] + 1 + curgap_size / 2) + 1]
)
return pts_to_reflect_left, pts_to_reflect_right
elif (
(gap_total != 1)
and (curgap[0] >= curgap_size / 2)
and ((gaps[curgap_num + 1][0] - 1) - (curgap[1] + 1) >= curgap_size / 2)
): # if enough space between this gap and the next
pts_to_reflect_left = copy.deepcopy(
data[int(curgap[0] - curgap_size / 2) : (curgap[0])]
)
pts_to_reflect_right = copy.deepcopy(
data[(curgap[1] + 1) : int(curgap[1] + 1 + curgap_size / 2) + 1]
)
return pts_to_reflect_left, pts_to_reflect_right
else: # not enough space on one/both sides
if gap_total == 1:
print("Unable to fill all gaps, not enough data. Exiting...")
return "Done"
else:
return None
elif (
curgap[0] >= gaps[curgap_num - 1][1] + curgap_size / 2
): # if enough space between previous gap and current gap
if (curgap[1] <= len(data) - (curgap_size / 2)) and (
curgap_num == gap_total
): # if last gap and enough space to the right
pts_to_reflect_left = copy.deepcopy(
data[int(curgap[0] - curgap_size / 2) : (curgap[0])]
)
pts_to_reflect_right = copy.deepcopy(
data[(curgap[1] + 1) : int(curgap[1] + 1 + curgap_size / 2) + 1]
)
return pts_to_reflect_left, pts_to_reflect_right
elif (curgap_num != gap_total) and (
(gaps[curgap_num + 1][0] - 1) - (curgap[1] + 1) >= curgap_size / 2
): # if enough space before next gap
pts_to_reflect_left = copy.deepcopy(
data[int(curgap[0] - curgap_size / 2) : (curgap[0])]
)
pts_to_reflect_right = copy.deepcopy(
data[(curgap[1] + 1) : int(curgap[1] + 1 + curgap_size / 2) + 1]
)
return pts_to_reflect_left, pts_to_reflect_right
else: # not enough space on one/both sides
if gap_total == 1:
print("Unable to fill all gaps, not enough data. Exiting...")
return "Done"
else:
return None
else: # not enough space to the left of the gap
if gap_total == 1:
print("Unable to fill all gaps, not enough data. Exiting...")
return "Done"
else:
return None
def check_left(data, gaps, curgap, curgap_size, curgap_num, gap_total):
"""A method to check the left-hand side of the data to obtain the correct number of points to fill the gap.
Parameters:
data - np.array, the data being imputed
gaps - dict, stores the gap indeces with their respective number
curgap - tuple, (start, end) indeces for current gap
curgap_size - int, size of current gap
curgap_num - int, number of current gap
gap_total - int, total number of gaps
"""
if curgap_num == 1: # if this is the first gap
pts_to_reflect = copy.deepcopy(data[(curgap[0] - curgap_size) : (curgap[0])])
return pts_to_reflect, "left"
elif (curgap[0] - 1) - (
gaps[curgap_num - 1][1] + 1
) >= curgap_size: # if data run between previous gap and current gap large enough
pts_to_reflect = copy.deepcopy(data[(curgap[0] - curgap_size) : (curgap[0])])
return pts_to_reflect, "left"
else: # if data run between previous gap and current gap not large enough, check data set to the right
pts_to_reflect = check_right(
data, gaps, curgap, curgap_size, curgap_num, gap_total
)
return pts_to_reflect
def fill(data, gaps, gap_total, gap_num, reverse=False):
"""A method to fill the gap_num-th gap in the data by reflecting and inverting data on one/both sides of the gap.
Parameters:
data - np.array, the data being imputed
gaps - dict, stores the gap indeces with their respective number
gap_total - int, total number of gaps
gap_num - int, number of current gap
reverse - bool, indicated whether dataset forward-oriented (gap being filled to the right of longest continuous data run) or reversed (left of longest run)
"""
# find size of gap
curgap = gaps[gap_num]
size = (curgap[1] + 1) - curgap[0]
side = None
# check if there is enough data previous to this gap
if reverse:
if curgap[1] > len(data) - size:
# if not enough previous data (right-hand since flipped), check data following gap
pts_to_reflect, side = check_left(
data, gaps, curgap, size, gap_num, gap_total
)
if pts_to_reflect is None:
# decrement gap_num - will come back to this gap once rest of left-sided gaps filled
gap_num = gap_num - 1
elif type(pts_to_reflect) == tuple:
left_pts_to_reflect = pts_to_reflect[0]
right_pts_to_reflect = pts_to_reflect[1]
elif pts_to_reflect == "Done":
return None
else:
pts_to_reflect, side = check_right(
data, gaps, curgap, size, gap_num, gap_total
)
if pts_to_reflect is None:
# decrement gap_num - will come back to this gap once rest of left-sided gaps filled
gap_num = gap_num - 1
elif type(pts_to_reflect) == tuple:
left_pts_to_reflect = pts_to_reflect[0]
right_pts_to_reflect = pts_to_reflect[1]
elif pts_to_reflect == "Done":
return None
else:
if curgap[0] < size:
# if not enough previous data, check data following gap
pts_to_reflect, side = check_right(
data, gaps, curgap, size, gap_num, gap_total
)
if pts_to_reflect is None:
# increment gap_to_impute - will come back to this gap once rest of right-sided gaps filled
gap_num = gap_num + 1
elif type(pts_to_reflect) == tuple:
left_pts_to_reflect = pts_to_reflect[0]
right_pts_to_reflect = pts_to_reflect[1]
else:
pts_to_reflect, side = check_left(
data, gaps, curgap, size, gap_num, gap_total
)
if pts_to_reflect is None:
# increment gap_to_impute - will come back to this gap once rest of right-sided gaps filled
gap_num = gap_num + 1
elif type(pts_to_reflect) == tuple:
left_pts_to_reflect = pts_to_reflect[0]
right_pts_to_reflect = pts_to_reflect[1]
# impute the gap
if pts_to_reflect is not None:
if type(pts_to_reflect) == tuple:
if len(left_pts_to_reflect) != 1:
for j in range(0, len(left_pts_to_reflect)):
left_pts_to_reflect[j] = (
(left_pts_to_reflect[-1] + left_pts_to_reflect[-2]) / 2
+ left_pts_to_reflect[-1]
- left_pts_to_reflect[j]
) # add the difference between last value and current value to last value to invert
if len(right_pts_to_reflect) != 1:
for j in range(0, len(right_pts_to_reflect)):
right_pts_to_reflect[j] = (
right_pts_to_reflect[0] + right_pts_to_reflect[1]
) / 2 + (right_pts_to_reflect[0] - right_pts_to_reflect[j])
data[curgap[0] : (curgap[0] + 1 + int(size / 2))] = left_pts_to_reflect[
::-1
]
data[(curgap[1] - int(size / 2)) : curgap[1] + 1] = right_pts_to_reflect[
::-1
]
# calculate linear slope to apply to fill pts
m, b = calculate_slope(
data, curgap, gap_num, gaps, gap_total, len(pts_to_reflect)
)
for j in range(0, size):
data[curgap[0] + j] = m * j + data[curgap[0] + j] + b
# if odd number of spaces in gap, average first left and last right values and put in middle gap value
if (size / 2) % 2 == 0.5:
mid_val = (left_pts_to_reflect[0] + right_pts_to_reflect[-1]) / 2
data[curgap[1] - int(size / 2)] = mid_val
if gap_num == gap_total:
gaps.pop(gap_num)
gap_num = gap_num - 1
# remove gap from gap map
for k in range(gap_num, gap_total):
gaps[k] = gaps.pop(k + 1)
gap_total = gap_total - 1
else:
# invert and add linear slope to match gap-fill ends to existing data
if side == "right":
if len(pts_to_reflect) != 1:
for j in range(0, len(pts_to_reflect)):
pts_to_reflect[j] = (
pts_to_reflect[0] + pts_to_reflect[1]
) / 2 + (pts_to_reflect[0] - pts_to_reflect[j])
else:
if len(pts_to_reflect) != 1:
for j in range(0, len(pts_to_reflect)):
pts_to_reflect[j] = (
(pts_to_reflect[-1] + pts_to_reflect[-2]) / 2
+ pts_to_reflect[-1]
- pts_to_reflect[j]
)
# calculate linear slope to apply to fill pts
data[curgap[0] : (curgap[1] + 1)] = pts_to_reflect[::-1]
m, b = calculate_slope(
data, curgap, gap_num, gaps, gap_total, len(pts_to_reflect)
)
for j in range(0, size):
data[curgap[0] + j] = m * j + data[curgap[0] + j] + b
if gap_num == gap_total:
gaps.pop(gap_num)
gap_num = gap_num - 1
else:
for k in range(gap_num, gap_total):
gaps[k] = gaps.pop(k + 1)
gap_total = gap_total - 1
return gap_num, gap_total
def calculate_slope(data, curgap, curgap_num, gaps, gap_total, fill_pts_len):
"""A method to calculate a linear slope to add to the fill points for the gap and ensure 2 end-point matching when possible.
Parameters:
data - np.ndarray, the data being imputed
curgap - tuple, (start, end) indeces for current gap
curgap_num - int, number of current gap
gaps - dict, stores the gap indeces with their respective number
gap_total - int, total number of gaps
fill_pts_len - int, the length of the data to fill the gap
"""
m = 0
b = 0
if fill_pts_len > 1:
# check for outliers at the boundary and skip over as necessary
start, end = check_boundaries(curgap, curgap_num, gaps, gap_total, data)
if (curgap_num != 1 and curgap[0] - gaps[curgap_num - 1][1] > 2) or (
curgap_num == 1 and curgap[0] >= 2
):
if (
curgap_num != gap_total and gaps[curgap_num + 1][0] - curgap[1] > 2
) or (curgap_num == gap_total and len(data) - curgap[1] > 2):
# 2-endpoint average matching
m = (
(data[end[0]] + data[end[1]]) / 2
- (data[start[1]] + data[start[0]]) / 2
) / (fill_pts_len + 4)
b = (data[start[1]] + data[start[0]]) / 2 - data[curgap[0]]
elif curgap_num != gap_total or len(data) - curgap[1] == 2:
# not 2 data points to match with on right side - endpoint matching w/ 2-pt average
m = (data[end[0]] - (data[start[1]] + data[start[0]]) / 2) / (
fill_pts_len + 3
)
b = (data[start[1]] + data[start[0]]) / 2 - data[curgap[0]]
else: # gap is at end of data
m = 0
b = (data[start[1]] + data[start[0]]) / 2 - data[curgap[0]]
elif curgap_num != 1 or curgap[0] == 1:
if (
curgap_num != gap_total and gaps[curgap_num + 1][0] - curgap[1] > 2
) or (curgap_num == gap_total and len(data) - curgap[1] > 2):
# not 2 data points to match with on left side - endpoint matching w/ 2-pt average
m = ((data[end[0]] + data[end[1]]) / 2 - data[start[1]]) / (
fill_pts_len + 2
)
b = data[start[1]] - data[curgap[0]]
elif curgap_num != gap_total or len(data) - curgap[1] == 2:
# not 2 data points to match with on either side - endpoint matching
m = (data[end[0]] - data[start[1]]) / (fill_pts_len + 2)
b = data[start[1]] - data[curgap[0]]
else: # gap is at end of data
m = 0
b = 0
else:
m = 0
b = 0
return m, b
def check_boundaries(curgap, curgap_num, gaps, gap_total, data):
"""Look for outliers at the current gap's boundaries.
Parameters:
curgap - tuple, start and end indices of the current gap
curgap_num - int, index of current gap
gaps - dict, stores all gaps in dataset
gap_total - int, total number of gaps
data - np.ndarray, the data being imputed
"""
start_int = None
start_ext = None
end_int = None
end_ext = None
if (curgap_num != 1 and curgap[0] - gaps[curgap_num - 1][1] < 11) or curgap[0] < 11:
start_ext = curgap[0] - 2
start_int = curgap[0] - 1
else:
data_diff = []
for i in range(curgap[0] - 10, curgap[0] - 1):
data_diff.append(data[i + 1] - data[i])
mad = median_abs_deviation(data_diff)
start_ext = curgap[0] - 2
start_int = curgap[0] - 1
cutoff = 5 * mad
while abs(data[start_ext]) > cutoff and abs(data[start_int]) > cutoff:
start_ext -= 1
start_int -= 1
if (curgap_num != gap_total and gaps[curgap_num + 1][0] - curgap[1] < 11) or len(
data
) - curgap[1] < 11:
end_int = curgap[1] + 1
end_ext = curgap[1] + 2
else:
data_diff = []
for i in range(curgap[1] + 1, curgap[1] + 11):
data_diff.append(data[i + 1] - data[i])
mad = median_abs_deviation(data_diff)
end_int = curgap[1] + 1
end_ext = curgap[1] + 2
cutoff = 5 * mad
while abs(data[end_int]) > cutoff and abs(data[end_ext]) > cutoff:
end_int += 1
end_ext += 1
if start_int < 0:
start_int = None
if start_ext < 0:
start_ext = None
if end_int > len(data) - 1:
end_int = None
if end_ext > len(data) - 1:
end_ext = None
start = (start_ext, start_int)
end = (end_int, end_ext)
return start, end
def fillgaps(datafile, method):
"""A method to fill gaps in data and preserve data noise characteristics.
Parameters:
datafile: a csv file formatted as follows: MJD, residual (us)
method: a string indicating which of the 4 methods of data gap filling to use (reflect, reflect_invert, replica, replica_endpoint)
"""
y = []
x = np.array([])
with open(datafile, "r") as f:
lines = f.readline()
while lines:
vals = lines.split(",")
x = np.append(x, float(vals[0]))
y.append(float(vals[1]))
lines = f.readline()
xindices = np.array((x - x[0]), dtype=int)
data = np.zeros(xindices.max() + 1) * np.nan
data[xindices] = y
fig, ax = plt.subplots(2, figsize=(12, 6))
ax[0].errorbar(x, y, fmt=".")
ax[0].grid(True)
ax[0].set_xlabel("MJD")
ax[0].set_ylabel("Residuals (us)")
# find indeces of data gaps and largest continuous run of data
gap_indeces = np.where(np.isnan(data))[0]
# check if the above numpy.ndarray empty - print message and exit
if gap_indeces.size == 0:
print("The data has no gaps. Exiting...")
return
gap_total = 1
start_gap_inx = gap_indeces[0]
maxdiff = start_gap_inx # run before first gap
gap_to_impute = 1
# initial value
gaps = {
1: (start_gap_inx, start_gap_inx)
} # {gap #: (starting index, ending index)}
for i in range(len(gap_indeces) - 1):
diff = gap_indeces[i + 1] - gap_indeces[i]
if diff > maxdiff:
maxdiff = diff
gap_to_impute = (
gap_total + 1
) # gap to the right of the largest run will be the first to impute
if diff > 1: # new gap reached
gaps[gap_total] = (start_gap_inx, gap_indeces[i])
start_gap_inx = gap_indeces[i + 1]
gap_total = gap_total + 1
# check if new gap also at last index (1-index gap)
if i == len(gap_indeces) - 2:
gaps[gap_total] = (start_gap_inx, start_gap_inx)
if i == len(gap_indeces) - 2: # only one jump in data set
gaps[gap_total] = (start_gap_inx, gap_indeces[i + 1])
# impute single-point gaps
i = 1
while i <= gap_total:
# for i in range(1, gap_total):
if gaps[i][0] == gaps[i][1]:
# if data on both sides of gap, take average of previous and following data point
if gaps[i][0] != 0 and gaps[i][0] != len(data) - 1:
data[gaps[i][0]] = (data[gaps[i][0] - 1] + data[gaps[i][0] + 1]) / 2
elif gaps[i][0] == 0:
data[0] = data[1]
else:
data[-1] = data[-2]
for k in range(i, gap_total):
gaps[k] = gaps.pop(k + 1)
gap_total = gap_total - 1
else:
i = i + 1
# check if first gap_to_impute now beyond the range of total gaps
if gap_to_impute > gap_total:
gap_to_impute = 1 # set to first gap - if only 1 gap, automatic choice, otherwise starting point
if gap_total > 1:
maxdiff = gaps[1][0] # run before first gap
for i in range(1, gap_total):
diff = gaps[i + 1][0] - gaps[i][1]
if diff > maxdiff:
maxdiff = diff
gap_to_impute = i + 1
# impute gaps to the right of this run, then reverse and do left (4 methods)
# i. Type 2 - reflect+invert
if method == "reflect+invert":
while gap_total != 0:
while gap_to_impute <= gap_total and gap_to_impute != 0:
gap_to_impute, gap_total = fill(data, gaps, gap_total, gap_to_impute)
if gap_to_impute is None:
return
# spacially reverse dataset, continue until beginning reached
for i in range(gap_to_impute, 0, -1):
gap_to_impute, gap_total = fill(data, gaps, gap_total, i, reverse=True)
if gap_to_impute is None:
return
# reset gap_to_impute if still gaps left
maxdiff = 0
for i in range(1, len(gaps)):
diff = gaps[i + 1][0] - gaps[i][1]
if diff > maxdiff:
gap_to_impute = (
i + 1
) # gap to the right of the largest continuous run
xfilled = np.linspace(
int(x[0]), int(x[-1]), num=int(x[-1] - x[0]) + 1
) # vector of filled-in MJDs, increments of 1 day
with open("result.csv", "w") as f:
csvwriter = csv.writer(f)
csvwriter.writerows(
zip(xfilled, data)
) # write results into csv file in same format as input
ax[1].errorbar(xfilled, data, fmt=".")
ax[1].grid(True)
ax[1].set_xlabel("MJD")
ax[1].set_ylabel("Residuals (us)")
plt.show() # displays graphs of data before/after imputation
if __name__ == "__main__":
if len(sys.argv) != 3:
raise ValueError("Need two arguments: data file and imputation method.")
fillgaps(sys.argv[1], sys.argv[2])
| mit |
binghongcha08/pyQMD | QMC/MC_exchange/permute3d/dissipation/1.0/en.py | 15 | 1291 | import numpy as np
import pylab as plt
import matplotlib.pyplot as plt
import matplotlib as mpl
#data = np.genfromtxt(fname='/home/bing/dissipation/energy.dat')
data = np.genfromtxt(fname='energy.dat')
fig, (ax1,ax2) = plt.subplots(ncols=1, nrows=2, sharex=True)
#font = {'family' : 'ubuntu',
# 'weight' : 'normal',
# 'size' : '16'}
#mpl.rc('font', **font) # pass in the font dict as kwargs
mpl.rcParams['font.size'] = 12
#mpl.rcParams['figure.figsize'] = 8,6
#pl.title('two-steps fitting alg')
ax1.set_ylabel('Energy [hartree]')
ax1.plot(data[:,0],data[:,2],'b--',linewidth=2,label='Potential')
#pl.plot(dat[:,0],dat[:,2],'r-',linewidth=2)
ax1.plot(data[:,0],data[:,3],'g-.',linewidth=2,label='Quantum Potential')
ax1.plot(data[:,0],data[:,4],'k-',linewidth=2,label='Energy')
#pl.legend(bbox_to_anchor=(0.5, 0.38, 0.42, .302), loc=3,ncol=1, mode="expand", borderaxespad=0.)
#ax1.set_yticks((0.4,0.6,0.8))
ax1.legend(loc=0)
#ax1.set_ylim(0,5)
ax2.set_xlabel('time [a.u.]')
ax2.set_ylabel('Energy [hartree]')
ax2.plot(data[:,0],data[:,1],'r--',linewidth=2,label='$Kinetic$')
#pl.plot(dat[:,0],dat[:,1],'k-',linewidth=2)
ax2.set_yscale('log')
#ax2.set_xticks((0,4,8))
#ax2.set_yticks((1e-7,1e-5,1e-3))
plt.legend(loc=0)
plt.subplots_adjust(hspace=0.)
plt.show()
| gpl-3.0 |
shangwuhencc/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| gpl-3.0 |
Djabbz/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
ChristophKirst/ClearMap | ClearMap/Analysis/Statistics.py | 2 | 15460 | # -*- coding: utf-8 -*-
"""
Create some statistics to test significant changes
in voxelized and labeled data
TODO: cleanup / make generic
"""
#:copyright: Copyright 2015 by Christoph Kirst, The Rockefeller University, New York City
#:license: GNU, see LICENSE.txt for details.
import sys
self = sys.modules[__name__];
import numpy
from scipy import stats
import ClearMap.IO as io
import ClearMap.Analysis.Label as lbl
import ClearMap.Analysis.Tools.StatisticalTests as stats2
def readDataGroup(filenames, combine = True, **args):
"""Turn a list of filenames for data into a numpy stack"""
#check if stack already:
if isinstance(filenames, numpy.ndarray):
return filenames;
#read the individual files
group = [];
for f in filenames:
data = io.readData(f, **args);
data = numpy.reshape(data, (1,) + data.shape);
group.append(data);
if combine:
return numpy.vstack(group);
else:
return group;
def readPointsGroup(filenames, **args):
"""Turn a list of filenames for points into a numpy stack"""
#check if stack already:
if isinstance(filenames, numpy.ndarray):
return filenames;
#read the individual files
group = [];
for f in filenames:
data = io.readPoints(f, **args);
#data = numpy.reshape(data, (1,) + data.shape);
group.append(data);
return group
#return numpy.vstack(group);
def tTestVoxelization(group1, group2, signed = False, removeNaN = True, pcutoff = None):
"""t-Test on differences between the individual voxels in group1 and group2, group is a array of voxelizations"""
g1 = self.readDataGroup(group1);
g2 = self.readDataGroup(group2);
tvals, pvals = stats.ttest_ind(g1, g2, axis = 0, equal_var = True);
#remove nans
if removeNaN:
pi = numpy.isnan(pvals);
pvals[pi] = 1.0;
tvals[pi] = 0;
pvals = self.cutoffPValues(pvals, pcutoff = pcutoff);
#return
if signed:
return pvals, numpy.sign(tvals);
else:
return pvals;
def cutoffPValues(pvals, pcutoff = 0.05):
if pcutoff is None:
return pvals;
pvals2 = pvals.copy();
pvals2[pvals2 > pcutoff] = pcutoff;
return pvals2;
def colorPValues(pvals, psign, positive = [1,0], negative = [0,1], pcutoff = None, positivetrend = [0,0,1,0], negativetrend = [0,0,0,1], pmax = None):
pvalsinv = pvals.copy();
if pmax is None:
pmax = pvals.max();
pvalsinv = pmax - pvalsinv;
if pcutoff is None: # color given p values
d = len(positive);
ds = pvals.shape + (d,);
pvc = numpy.zeros(ds);
#color
ids = psign > 0;
pvalsi = pvalsinv[ids];
for i in range(d):
pvc[ids, i] = pvalsi * positive[i];
ids = psign < 0;
pvalsi = pvalsinv[ids];
for i in range(d):
pvc[ids, i] = pvalsi * negative[i];
return pvc;
else: # split pvalues according to cutoff
d = len(positivetrend);
if d != len(positive) or d != len(negative) or d != len(negativetrend) :
raise RuntimeError('colorPValues: postive, negative, postivetrend and negativetrend option must be equal length!');
ds = pvals.shape + (d,);
pvc = numpy.zeros(ds);
idc = pvals < pcutoff;
ids = psign > 0;
##color
# significant postive
ii = numpy.logical_and(ids, idc);
pvalsi = pvalsinv[ii];
w = positive;
for i in range(d):
pvc[ii, i] = pvalsi * w[i];
#non significant postive
ii = numpy.logical_and(ids, numpy.negative(idc));
pvalsi = pvalsinv[ii];
w = positivetrend;
for i in range(d):
pvc[ii, i] = pvalsi * w[i];
# significant negative
ii = numpy.logical_and(numpy.negative(ids), idc);
pvalsi = pvalsinv[ii];
w = negative;
for i in range(d):
pvc[ii, i] = pvalsi * w[i];
#non significant postive
ii = numpy.logical_and(numpy.negative(ids), numpy.negative(idc))
pvalsi = pvalsinv[ii];
w = negativetrend;
for i in range(d):
pvc[ii, i] = pvalsi * w[i];
return pvc;
def mean(group, **args):
g = self.readGroup(group, **args);
return g.mean(axis = 0);
def std(group, **args):
g = self.readGroup(group, **args);
return g.std(axis = 0);
def var(group, **args):
g = self.readGroup(group, **args);
return g.var(axis = 0);
def thresholdPoints(points, intensities, threshold = 0, row = 0):
"""Threshold points by intensities"""
points, intensities = io.readPoints((points, intensities));
if not isinstance(threshold, tuple):
threshold = (threshold, all);
if not isinstance(row, tuple):
row = (row, row);
if intensities.ndim > 1:
i = intensities[:,row[0]];
else:
i = intensities;
iids = numpy.ones(i.shape, dtype = 'bool');
if not threshold[0] is all:
iids = numpy.logical_and(iids, i >= threshold[0]);
if intensities.ndim > 1:
i = intensities[:,row[1]];
if not threshold[1] is all:
iids = numpy.logical_and(iids, i <= threshold[1]);
return (points[iids, ...], intensities[iids, ...]);
def weightsFromPrecentiles(intensities, percentiles = [25,50,75,100]):
perc = numpy.percentiles(intensities, percentiles);
weights = numpy.zeros(intensities.shape);
for p in perc:
ii = intensities > p;
weights[ii] = weights[ii] + 1;
return weights;
# needs clean up
def countPointsGroupInRegions(pointGroup, labeledImage = lbl.DefaultLabeledImageFile, intensityGroup = None, intensityRow = 0, returnIds = True, returnCounts = False, collapse = None):
"""Generates a table of counts for the various point datasets in pointGroup"""
if intensityGroup is None:
counts = [lbl.countPointsInRegions(pointGroup[i], labeledImage = labeledImage, sort = True, allIds = True, returnIds = False, returnCounts = returnCounts, intensities = None, collapse = collapse) for i in range(len(pointGroup))];
else:
counts = [lbl.countPointsInRegions(pointGroup[i], labeledImage = labeledImage, sort = True, allIds = True, returnIds = False, returnCounts = returnCounts,
intensities = intensityGroup[i], intensityRow = intensityRow, collapse = collapse) for i in range(len(pointGroup))];
if returnCounts and not intensityGroup is None:
countsi = (c[1] for c in counts);
counts = (c[0] for c in counts);
else:
countsi = None;
counts = numpy.vstack((c for c in counts)).T;
if not countsi is None:
countsi = numpy.vstack((c for c in countsi)).T;
if returnIds:
ids = numpy.sort(lbl.Label.ids);
#ids.shape = (1,) + ids.shape;
#return numpy.concatenate((ids.T,counts), axis = 1
if countsi is None:
return ids, counts
else:
return ids, counts, countsi
else:
if countsi is None:
return counts
else:
return counts, countsi
# needs clean up
def tTestPointsInRegions(pointCounts1, pointCounts2, labeledImage = lbl.DefaultLabeledImageFile, signed = False, removeNaN = True, pcutoff = None, equal_var = False):
"""t-Test on differences in counts of points in labeled regions"""
#ids, p1 = countPointsGroupInRegions(pointGroup1, labeledImage = labeledImage, withIds = True);
#p2 = countPointsGroupInRegions(pointGroup2, labeledImage = labeledImage, withIds = False);
tvals, pvals = stats.ttest_ind(pointCounts1, pointCounts2, axis = 1, equal_var = equal_var);
#remove nans
if removeNaN:
pi = numpy.isnan(pvals);
pvals[pi] = 1.0;
tvals[pi] = 0;
pvals = self.cutoffPValues(pvals, pcutoff = pcutoff);
#pvals.shape = (1,) + pvals.shape;
#ids.shape = (1,) + ids.shape;
#pvals = numpy.concatenate((ids.T, pvals.T), axis = 1);
if signed:
return pvals, numpy.sign(tvals);
else:
return pvals;
def testCompletedCumulatives(data, method = 'AndersonDarling', offset = None, plot = False):
"""Test if data sets have the same number / intensity distribution by adding max intensity counts to the smaller sized data sets and performing a distribution comparison test"""
#idea: fill up data points to the same numbers at the high intensity values and use KS test
#cf. work in progress on thoouroghly testing the differences in histograms
#fill up the low count data
n = numpy.array([x.size for x in data]);
nm = n.max();
m = numpy.array([x.max() for x in data]);
mm = m.max();
k = n.size;
#print nm, mm, k
if offset is None:
#assume data starts at 0 !
offset = mm / nm; #ideall for all statistics this should be mm + eps to have as little influence as possible.
datac = [x.copy() for x in data];
for i in range(m.size):
if n[i] < nm:
datac[i] = numpy.concatenate((datac[i], numpy.ones(nm-n[i], dtype = datac[i].dtype) * (mm + offset))); # + 10E-5 * numpy.random.rand(nm-n[i])));
#test by plotting
if plot is True:
import matplotlib.pyplot as plt;
for i in range(m.size):
datac[i].sort();
plt.step(datac[i], numpy.arange(datac[i].size));
#perfomr the tests
if method == 'KolmogorovSmirnov' or method == 'KS':
if k == 2:
(s, p) = stats.ks_2samp(datac[0], datac[1]);
else:
raise RuntimeError('KolmogorovSmirnov only for 2 samples not %d' % k);
elif method == 'CramervonMises' or method == 'CM':
if k == 2:
(s,p) = stats2.testCramerVonMises2Sample(datac[0], datac[1]);
else:
raise RuntimeError('CramervonMises only for 2 samples not %d' % k);
elif method == 'AndersonDarling' or method == 'AD':
(s,a,p) = stats.anderson_ksamp(datac);
return (p,s);
def testCompletedInvertedCumulatives(data, method = 'AndersonDarling', offset = None, plot = False):
"""Test if data sets have the same number / intensity distribution by adding zero intensity counts to the smaller sized data sets and performing a distribution comparison test on the reversed cumulative distribution"""
#idea: fill up data points to the same numbers at the high intensity values and use KS test
#cf. work in progress on thoouroghly testing the differences in histograms
#fill up the low count data
n = numpy.array([x.size for x in data]);
nm = n.max();
m = numpy.array([x.max() for x in data]);
mm = m.max();
k = n.size;
#print nm, mm, k
if offset is None:
#assume data starts at 0 !
offset = mm / nm; #ideall for all statistics this should be mm + eps to have as little influence as possible.
datac = [x.copy() for x in data];
for i in range(m.size):
if n[i] < nm:
datac[i] = numpy.concatenate((-datac[i], numpy.ones(nm-n[i], dtype = datac[i].dtype) * (offset))); # + 10E-5 * numpy.random.rand(nm-n[i])));
else:
datac[i] = -datac[i];
#test by plotting
if plot is True:
import matplotlib.pyplot as plt;
for i in range(m.size):
datac[i].sort();
plt.step(datac[i], numpy.arange(datac[i].size));
#perfomr the tests
if method == 'KolmogorovSmirnov' or method == 'KS':
if k == 2:
(s, p) = stats.ks_2samp(datac[0], datac[1]);
else:
raise RuntimeError('KolmogorovSmirnov only for 2 samples not %d' % k);
elif method == 'CramervonMises' or method == 'CM':
if k == 2:
(s,p) = stats2.testCramerVonMises2Sample(datac[0], datac[1]);
else:
raise RuntimeError('CramervonMises only for 2 samples not %d' % k);
elif method == 'AndersonDarling' or method == 'AD':
(s,a,p) = stats.anderson_ksamp(datac);
return (p,s);
def testCompletedCumulativesInSpheres(points1, intensities1, points2, intensities2, dataSize = lbl.DefaultLabeledImageFile, radius = 100, method = 'AndresonDarling'):
"""Performs completed cumulative distribution tests for each pixel using points in a ball centered at that cooridnates, returns 4 arrays p value, statistic value, number in each group"""
#TODO: sinple implementation -> slow -> speed up
dataSize = io.dataSize(dataSize);
if len(dataSize) != 3:
raise RuntimeError('dataSize expected to be 3d');
# distances^2 to origin
x1= points1[:,0]; y1 = points1[:,1]; z1 = points1[:,2]; i1 = intensities1;
d1 = x1 * x1 + y1 * y1 + z1 * z1;
x2 = points2[:,0]; y2 = points2[:,1]; z2 = points2[:,2]; i2 = intensities2;
d2 = x2 * x2 + y2 * y2 + z2 * z2;
r2 = radius * radius; # TODO: inhomogenous in 3d !
p = numpy.zeros(dataSize);
s = numpy.zeros(dataSize);
n1 = numpy.zeros(dataSize, dtype = 'int');
n2 = numpy.zeros(dataSize, dtype = 'int');
for x in range(dataSize[0]):
#print x
for y in range(dataSize[1]):
#print y
for z in range(dataSize[2]):
#print z
d11 = d1 - 2 * (x * x1 + y * y1 + z * z1) + (x*x + y*y + z*z);
d22 = d2 - 2 * (x * x2 + y * y2 + z * z2) + (x*x + y*y + z*z);
ii1 = d11 < r2;
ii2 = d22 < r2;
n1[x,y,z] = ii1.sum();
n2[x,y,z] = ii2.sum();
if n1[x,y,z] > 0 and n2[x,y,z] > 0:
(pp, ss) = self.testCompletedCumulatives((i1[ii1], i2[ii2]), method = method);
else:
pp = 0; ss = 0;
p[x,y,z] = pp;
s[x,y,z] = ss;
return (p,s,n1,n2);
def test():
"""Test the statistics array"""
import ClearMap.Analysis.Statistics as self
reload(self)
import numpy, os
#x = stats.norm.rvs(loc=5,scale=1,size=1500)
#y = stats.norm.rvs(loc=-5,scale=1,size=1500)
s = numpy.ones((5,4,20));
s[:, 0:3, :] = - 1;
x = numpy.random.rand(4,4,20);
y = numpy.random.rand(5,4,20) + s;
# print stats.ttest_ind(x,y, axis = 0, equal_var = False);
pvals, psign = self.tTestVoxelization(x,y, signed = True);
print pvals
pvalscol = self.colorPValues(pvals, psign, positive = [255,0,0], negative = [0,255,0])
import ClearMap.Visualization.Plot as plt
plt.plotTiling(pvalscol)
# test points
import ClearMap.Settings as settings
pf = os.path.join(settings.ClearMapPath, 'Test/Synthetic/cells_transformed_to_reference.csv');
pg = (pf,pf);
pc = self.countPointsGroupInRegions(pg);
pvals, tvals = self.tTestPointsInRegions(pg, pg, signed = True);
if __name__ == "__main__":
self.test();
| gpl-3.0 |
btallman/incubator-airflow | docs/conf.py | 33 | 8957 | # -*- coding: utf-8 -*-
#
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import mock
MOCK_MODULES = [
'apiclient',
'apiclient.discovery',
'apiclient.http',
'mesos',
'mesos.interface',
'mesos.native',
'oauth2client.service_account',
'pandas.io.gbq',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
from airflow import settings
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
viewcode_import = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Airflow'
#copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '1.0.0'
# The full version, including alpha/beta/rc tags.
#release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Airflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Airflow.tex', u'Airflow Documentation',
u'Maxime Beauchemin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'airflow', u'Airflow Documentation',
[u'Maxime Beauchemin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'Airflow', u'Airflow Documentation',
u'Maxime Beauchemin', 'Airflow',
'Airflow is a system to programmaticaly author, schedule and monitor data pipelines.',
'Miscellaneous'
),]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
liang42hao/bokeh | bokeh/charts/tests/test_data_adapter.py | 37 | 3285 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import DataAdapter
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDataAdapter(unittest.TestCase):
def setUp(self):
self._values = OrderedDict()
self._values['first'] = [2., 5., 3.]
self._values['second'] = [4., 1., 4.]
self._values['third'] = [6., 4., 3.]
def test_list(self):
values = list(self._values.values())
da = DataAdapter(values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_array(self):
values = np.array(list(self._values.values()))
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['0', '1', '2'])
self.assertEqual(da.keys(), ['0', '1', '2'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_pandas(self):
values = pd.DataFrame(self._values)
da = DataAdapter(values)
# TODO: THIS SHOULD BE FIXED..
#self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
# We expect data adapter index to be the same as the underlying pandas
# object and not the default created by DataAdapter
self.assertEqual(da.index, [0, 1, 2])
def test_ordered_dict(self):
da = DataAdapter(self._values)
self.assertEqual(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, ['a', 'b', 'c'])
def test_blaze_data_no_fields(self):
import blaze
valuesdf = pd.DataFrame(self._values)
values = blaze.Data(valuesdf)
da = DataAdapter(values)
assert_array_equal(da.values(), list(self._values.values()))
self.assertEqual(da.columns, ['first', 'second', 'third'])
self.assertEqual(da.keys(), ['first', 'second', 'third'])
self.assertEqual(da.index, [0, 1, 2])
xs, _values = DataAdapter.get_index_and_data(values, None)
assert_array_equal([0,1,2], xs)
| bsd-3-clause |
BrownDwarf/Starfish | attic/synthphot.py | 2 | 3197 | #!/usr/bin/env python2
import pysynphot as S
import pyfits as pf
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
from PHOENIX_tools import load_flux_full, w_full
from deredden import deredden
c_ang = 2.99792458e18 #A s^-1
#data for GWOri
#wl [microns], f_nu [Jy], sys_err (as fraction of f_nu)
data = np.array([[0.36, 6.482e-02, 0.12],
[0.44, 2.451e-01, 0.30],
[0.55, 4.103e-01, 0.08],
[0.64, 7.844e-01, 0.11],
[0.79, 9.174e-01, 0.07]])
wl = data[:, 0] * 1e4 #angstroms
f_nu = data[:, 1] * 1e-23 #ergs/cm^2/s/Hz
f_nu_err = f_nu * data[:, 2] #ergs/cm^2/s/Hz
#Convert to f_lambda
f_lam = f_nu * c_ang / wl ** 2
f_lam_err = f_nu_err * c_ang / wl ** 2
filters = ["U", "B", "V", "R", "I"]
ind = (w_full > 2000) & (w_full < 40000)
ww = w_full[ind]
ff = load_flux_full(5900, 3.5)[ind] * 2e-28
#redden spectrum
#red = ff/deredden(ww,1.5,mags=False)
sp = S.ArraySpectrum(wave=ww, flux=ff, waveunits='angstrom', fluxunits='flam', name='T5900K')
#sdss_u=S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/sdss_u_005_syn.fits")
#sdss_g=S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/sdss_g_005_syn.fits")
#sdss_r=S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/sdss_r_005_syn.fits")
#sdss_i=S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/sdss_i_005_syn.fits")
#sdss_z=S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/sdss_z_005_syn.fits")
U = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/landolt_u_004_syn.fits")
B = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/landolt_b_004_syn.fits")
V = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/landolt_v_004_syn.fits")
R = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/landolt_r_004_syn.fits")
I = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/landolt_i_004_syn.fits")
#H = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/bessell_h_004_syn.fits")
#J = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/bessell_j_003_syn.fits")
#K = S.FileBandpass("/home/ian/.builds/stsci_python-2.12/pysynphot/cdbs/comp/nonhst/bessell_k_003_syn.fits")
#obs = S.Observation(sp,sdss_u)
#print("Filter = {name}\tAB = {AB:.3f}\tVega = {Vega:.3f}".format(name="i",AB=obs.effstim("abmag"),Vega=obs.effstim(
# "vegamag")))
pfilts = [U, B, V, R, I]
def calc_fluxes():
return np.array([S.Observation(sp, i).effstim("flam") for i in pfilts])
#fluxes = calc_fluxes()
def plot_SED():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.errorbar(wl, f_lam, yerr=f_lam_err, ls="", fmt="o")
ax.plot(wl, fluxes, "o")
ax.set_xlabel(r"$\lambda$ [\AA]")
ax.set_ylabel(r"$F_\lambda$ $\left [\frac{{\rm erg}}{{\rm s} \cdot {\rm cm}^2 \cdot {\rm \AA}} \right ]$")
plt.show()
def main():
print(calc_fluxes())
#plot_SED()
if __name__ == "__main__":
main()
| bsd-3-clause |
kashif/scikit-learn | sklearn/linear_model/sag.py | 29 | 11291 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
juliojsb/sarviewer | plotters/matplotlib/ram.py | 1 | 2409 | #!/usr/bin/env python2
"""
Author :Julio Sanz
Website :www.elarraydejota.com
Email :[email protected]
Description :Generate RAM graph from ram.dat file
Dependencies :Python 2.x, matplotlib
Usage :python ram.py
License :GPLv3
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import csv
from datetime import datetime
import matplotlib.dates
# ======================
# VARIABLES
# ======================
# Aesthetic parameters
plt.rcParams.update({'font.size': 8})
plt.rcParams['lines.linewidth'] = 1.5
time_format = matplotlib.dates.DateFormatter('%H:%M:%S')
plt.gca().xaxis.set_major_formatter(time_format)
plt.gcf().autofmt_xdate()
# Time (column 0)
x = []
# Memory data arrays
free_mem = []
used_mem = []
buffer_mem = []
cached_mem = []
# ======================
# FUNCTIONS
# ======================
def generate_graph():
with open('../../data/ram.dat', 'r') as csvfile:
data_source = csv.reader(csvfile, delimiter=' ', skipinitialspace=True)
for row in data_source:
# [0] column is a time column
# Convert to datetime data type
a = datetime.strptime((row[0]),'%H:%M:%S')
x.append((a))
# The remaining columns contain data
free_mem.append(str((int(row[1])/1024)+(int(row[4])/1024)+(int(row[5])/1024)))
used_mem.append(str((int(row[2])/1024)-(int(row[4])/1024)-(int(row[5])/1024)))
buffer_mem.append(str(int(row[4])/1024))
cached_mem.append(str(int(row[5])/1024))
# Plot lines
plt.plot(x,free_mem, label='Free', color='g', antialiased=True)
plt.plot(x,used_mem, label='Used', color='r', antialiased=True)
plt.plot(x,buffer_mem, label='Buffer', color='b', antialiased=True)
plt.plot(x,cached_mem, label='Cached', color='c', antialiased=True)
# Graph properties
plt.xlabel('Time',fontstyle='italic')
plt.ylabel('Memory (MB)',fontstyle='italic')
plt.title('RAM usage graph')
plt.grid(linewidth=0.4, antialiased=True)
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=2, fancybox=True, shadow=True)
plt.autoscale(True)
# Graph saved to PNG file
plt.savefig('../../graphs/ram.png', bbox_inches='tight')
#plt.show()
# ======================
# MAIN
# ======================
if __name__ == '__main__':
generate_graph() | gpl-3.0 |
ClimbsRocks/scikit-learn | sklearn/mixture/tests/test_gmm.py | 4 | 20668 | # These tests are those of the deprecated GMM class
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import (assert_greater, assert_raise_message,
assert_warns_message, ignore_warnings)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, spherecv, 'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = assert_warns_message(DeprecationWarning, "The function"
" log_multivariate_normal_density is "
"deprecated in 0.18 and will be removed in 0.20.",
mixture.log_multivariate_normal_density,
X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
with ignore_warnings(category=DeprecationWarning):
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
with ignore_warnings(category=DeprecationWarning):
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
with ignore_warnings(category=DeprecationWarning):
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
with ignore_warnings(category=DeprecationWarning):
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
with ignore_warnings(category=DeprecationWarning):
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
with ignore_warnings(category=DeprecationWarning):
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined
# distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
with ignore_warnings(category=DeprecationWarning):
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.dpgmm._DPGMMBase):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def score(self, g, X):
with ignore_warnings(category=DeprecationWarning):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
with ignore_warnings(category=DeprecationWarning):
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_n_parameters():
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
with ignore_warnings(category=DeprecationWarning):
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
with ignore_warnings(category=DeprecationWarning):
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
# This function tests the deprecated old GMM class
@ignore_warnings(category=DeprecationWarning)
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
drammock/mne-python | mne/preprocessing/ica.py | 2 | 118763 | # -*- coding: utf-8 -*-
#
# Authors: Denis A. Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
# Juergen Dammers <[email protected]>
#
# License: BSD (3-clause)
from inspect import isfunction
from collections import namedtuple
from copy import deepcopy
from numbers import Integral
from time import time
import math
import os
import json
import numpy as np
from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
create_ecg_epochs)
from .eog import _find_eog_events, _get_eog_channel_index
from .infomax_ import infomax
from ..cov import compute_whitener
from .. import Covariance, Evoked
from ..io.pick import (pick_types, pick_channels, pick_info,
_picks_to_idx, _get_channel_types, _DATA_CH_TYPES_SPLIT)
from ..io.proj import make_projector
from ..io.write import (write_double_matrix, write_string,
write_name_list, write_int, start_block,
end_block)
from ..io.tree import dir_tree_find
from ..io.open import fiff_open
from ..io.tag import read_tag
from ..io.meas_info import write_meas_info, read_meas_info
from ..io.constants import FIFF
from ..io.base import BaseRaw
from ..io.eeglab.eeglab import _get_info, _check_load_mat
from ..epochs import BaseEpochs
from ..viz import (plot_ica_components, plot_ica_scores,
plot_ica_sources, plot_ica_overlay)
from ..viz.ica import plot_ica_properties
from ..viz.topomap import _plot_corrmap
from ..channels.channels import _contains_ch_type, ContainsMixin
from ..io.write import start_file, end_file, write_id
from ..utils import (check_version, logger, check_fname, verbose,
_reject_data_segments, check_random_state, _validate_type,
compute_corr, _get_inst_data, _ensure_int,
copy_function_doc_to_method_doc, _pl, warn, Bunch,
_check_preload, _check_compensation_grade, fill_doc,
_check_option, _PCA, int_like,
_check_all_same_channel_names)
from ..fixes import _get_args, _safe_svd
from ..filter import filter_data
from .bads import _find_outliers
from .ctps_ import ctps
from ..io.pick import pick_channels_regexp
__all__ = ('ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
'get_score_funcs', 'read_ica', 'read_ica_eeglab')
def _make_xy_sfunc(func, ndim_output=False):
"""Aux function."""
if ndim_output:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])[:, 0]
else:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])
sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
sfunc.__doc__ = func.__doc__
return sfunc
# Violate our assumption that the output is 1D so can't be used.
# Could eventually be added but probably not worth the effort unless someone
# requests it.
_BLOCKLIST = {'somersd'}
# makes score funcs attr accessible for users
def get_score_funcs():
"""Get the score functions.
Returns
-------
score_funcs : dict
The score functions.
"""
from scipy import stats
from scipy.spatial import distance
score_funcs = Bunch()
xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
score_funcs.update({n: _make_xy_sfunc(f)
for n, f in xy_arg_dist_funcs
if _get_args(f) == ['u', 'v']})
score_funcs.update({n: _make_xy_sfunc(f, ndim_output=True)
for n, f in xy_arg_stats_funcs
if _get_args(f) == ['x', 'y']})
return score_funcs
def _check_for_unsupported_ica_channels(picks, info, allow_ref_meg=False):
"""Check for channels in picks that are not considered valid channels.
Accepted channels are the data channels
('seeg', 'dbs', 'ecog', 'eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog'
and 'ref_meg'.
This prevents the program from crashing without
feedback when a bad channel is provided to ICA whitening.
"""
types = _DATA_CH_TYPES_SPLIT + ('eog',)
types += ('ref_meg',) if allow_ref_meg else ()
chs = _get_channel_types(info, picks, unique=True, only_data_chs=False)
check = all([ch in types for ch in chs])
if not check:
raise ValueError('Invalid channel type%s passed for ICA: %s.'
'Only the following types are supported: %s'
% (_pl(chs), chs, types))
_KNOWN_ICA_METHODS = ('fastica', 'infomax', 'picard')
@fill_doc
class ICA(ContainsMixin):
u"""Data decomposition using Independent Component Analysis (ICA).
This object estimates independent components from :class:`mne.io.Raw`,
:class:`mne.Epochs`, or :class:`mne.Evoked` objects. Components can
optionally be removed (for artifact repair) prior to signal reconstruction.
.. warning:: ICA is sensitive to low-frequency drifts and therefore
requires the data to be high-pass filtered prior to fitting.
Typically, a cutoff frequency of 1 Hz is recommended.
Parameters
----------
n_components : int | float | None
Number of principal components (from the pre-whitening PCA step) that
are passed to the ICA algorithm during fitting:
- :class:`int`
Must be greater than 1 and less than or equal to the number of
channels.
- :class:`float` between 0 and 1 (exclusive)
Will select the smallest number of components required to explain
the cumulative variance of the data greater than ``n_components``.
Consider this hypothetical example: we have 3 components, the first
explaining 70%%, the second 20%%, and the third the remaining 10%%
of the variance. Passing 0.8 here (corresponding to 80%% of
explained variance) would yield the first two components,
explaining 90%% of the variance: only by using both components the
requested threshold of 80%% explained variance can be exceeded. The
third component, on the other hand, would be excluded.
- ``None``
``0.999999`` will be used. This is done to avoid numerical
stability problems when whitening, particularly when working with
rank-deficient data.
Defaults to ``None``. The actual number used when executing the
:meth:`ICA.fit` method will be stored in the attribute
``n_components_`` (note the trailing underscore).
.. versionchanged:: 0.22
For a :class:`python:float`, the number of components will account
for *greater than* the given variance level instead of *less than or
equal to* it. The default (None) will also take into account the
rank deficiency of the data.
noise_cov : None | instance of Covariance
Noise covariance used for pre-whitening. If None (default), channels
are scaled to unit variance ("z-standardized") as a group by channel
type prior to the whitening by PCA.
%(random_state)s
As estimation can be non-deterministic it can be useful to fix the
random state to have reproducible results.
method : {'fastica', 'infomax', 'picard'}
The ICA method to use in the fit method. Use the ``fit_params`` argument
to set additional parameters. Specifically, if you want Extended
Infomax, set ``method='infomax'`` and ``fit_params=dict(extended=True)``
(this also works for ``method='picard'``). Defaults to ``'fastica'``.
For reference, see :footcite:`Hyvarinen1999,BellSejnowski1995,LeeEtAl1999,AblinEtAl2018`.
fit_params : dict | None
Additional parameters passed to the ICA estimator as specified by
``method``.
max_iter : int | 'auto'
Maximum number of iterations during fit. If ``'auto'``, it
will set maximum iterations to ``1000`` for ``'fastica'``
and to ``500`` for ``'infomax'`` or ``'picard'``. The actual number of
iterations it took :meth:`ICA.fit` to complete will be stored in the
``n_iter_`` attribute.
allow_ref_meg : bool
Allow ICA on MEG reference channels. Defaults to False.
.. versionadded:: 0.18
%(verbose)s
Attributes
----------
current_fit : str
Flag informing about which data type (raw or epochs) was used for the
fit.
ch_names : list-like
Channel names resulting from initial picking.
n_components_ : int
If fit, the actual number of PCA components used for ICA decomposition.
pre_whitener_ : ndarray, shape (n_channels, 1) or (n_channels, n_channels)
If fit, array used to pre-whiten the data prior to PCA.
pca_components_ : ndarray, shape ``(n_channels, n_channels)``
If fit, the PCA components.
pca_mean_ : ndarray, shape (n_channels,)
If fit, the mean vector used to center the data before doing the PCA.
pca_explained_variance_ : ndarray, shape ``(n_channels,)``
If fit, the variance explained by each PCA component.
mixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened mixing matrix to go back from ICA space to PCA
space.
It is, in combination with the ``pca_components_``, used by
:meth:`ICA.apply` and :meth:`ICA.get_components` to re-mix/project
a subset of the ICA components into the observed channel space.
The former method also removes the pre-whitening (z-scaling) and the
de-meaning.
unmixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened matrix to go from PCA space to ICA space.
Used, in combination with the ``pca_components_``, by the methods
:meth:`ICA.get_sources` and :meth:`ICA.apply` to unmix the observed
data.
exclude : array-like of int
List or np.array of sources indices to exclude when re-mixing the data
in the :meth:`ICA.apply` method, i.e. artifactual ICA components.
The components identified manually and by the various automatic
artifact detection methods should be (manually) appended
(e.g. ``ica.exclude.extend(eog_inds)``).
(There is also an ``exclude`` parameter in the :meth:`ICA.apply`
method.) To scrap all marked components, set this attribute to an empty
list.
info : None | instance of Info
The measurement info copied from the object fitted.
n_samples_ : int
The number of samples used on fit.
labels_ : dict
A dictionary of independent component indices, grouped by types of
independent components. This attribute is set by some of the artifact
detection functions.
n_iter_ : int
If fit, the number of iterations required to complete ICA.
Notes
-----
.. versionchanged:: 0.23
Version 0.23 introduced the ``max_iter='auto'`` settings for maximum
iterations. With version 0.24 ``'auto'`` will be the new
default, replacing the current ``max_iter=200``.
.. versionchanged:: 0.23
Warn if `~mne.Epochs` were baseline-corrected.
.. note:: If you intend to clean fit ICA on `~mne.Epochs`, it is
recommended to high-pass filter, but **not** baseline correct the
data for good ICA performance. A warning will be emitted
otherwise.
A trailing ``_`` in an attribute name signifies that the attribute was
added to the object during fitting, consistent with standard scikit-learn
practice.
ICA :meth:`fit` in MNE proceeds in two steps:
1. :term:`Whitening <whitening>` the data by means of a pre-whitening step
(using ``noise_cov`` if provided, or the standard deviation of each
channel type) and then principal component analysis (PCA).
2. Passing the ``n_components`` largest-variance components to the ICA
algorithm to obtain the unmixing matrix (and by pseudoinversion, the
mixing matrix).
ICA :meth:`apply` then:
1. Unmixes the data with the ``unmixing_matrix_``.
2. Includes ICA components based on ``ica.include`` and ``ica.exclude``.
3. Re-mixes the data with ``mixing_matrix_``.
4. Restores any data not passed to the ICA algorithm, i.e., the PCA
components between ``n_components`` and ``n_pca_components``.
``n_pca_components`` determines how many PCA components will be kept when
reconstructing the data when calling :meth:`apply`. This parameter can be
used for dimensionality reduction of the data, or dealing with low-rank
data (such as those with projections, or MEG data processed by SSS). It is
important to remove any numerically-zero-variance components in the data,
otherwise numerical instability causes problems when computing the mixing
matrix. Alternatively, using ``n_components`` as a float will also avoid
numerical stability problems.
The ``n_components`` parameter determines how many components out of
the ``n_channels`` PCA components the ICA algorithm will actually fit.
This is not typically used for EEG data, but for MEG data, it's common to
use ``n_components < n_channels``. For example, full-rank
306-channel MEG data might use ``n_components=40`` to find (and
later exclude) only large, dominating artifacts in the data, but still
reconstruct the data using all 306 PCA components. Setting
``n_pca_components=40``, on the other hand, would actually reduce the
rank of the reconstructed data to 40, which is typically undesirable.
If you are migrating from EEGLAB and intend to reduce dimensionality via
PCA, similarly to EEGLAB's ``runica(..., 'pca', n)`` functionality,
pass ``n_components=n`` during initialization and then
``n_pca_components=n`` during :meth:`apply`. The resulting reconstructed
data after :meth:`apply` will have rank ``n``.
.. note:: Commonly used for reasons of i) computational efficiency and
ii) additional noise reduction, it is a matter of current debate
whether pre-ICA dimensionality reduction could decrease the
reliability and stability of the ICA, at least for EEG data and
especially during preprocessing :footcite:`ArtoniEtAl2018`.
(But see also :footcite:`Montoya-MartinezEtAl2017` for a
possibly confounding effect of the different whitening/sphering
methods used in this paper (ZCA vs. PCA).)
On the other hand, for rank-deficient data such as EEG data after
average reference or interpolation, it is recommended to reduce
the dimensionality (by 1 for average reference and 1 for each
interpolated channel) for optimal ICA performance (see the
`EEGLAB wiki <eeglab_wiki_>`_).
Caveat! If supplying a noise covariance, keep track of the projections
available in the cov or in the raw object. For example, if you are
interested in EOG or ECG artifacts, EOG and ECG projections should be
temporally removed before fitting ICA, for example::
>> projs, raw.info['projs'] = raw.info['projs'], []
>> ica.fit(raw)
>> raw.info['projs'] = projs
Methods currently implemented are FastICA (default), Infomax, and Picard.
Standard Infomax can be quite sensitive to differences in floating point
arithmetic. Extended Infomax seems to be more stable in this respect,
enhancing reproducibility and stability of results; use Extended Infomax
via ``method='infomax', fit_params=dict(extended=True)``. Allowed entries
in ``fit_params`` are determined by the various algorithm implementations:
see :class:`~sklearn.decomposition.FastICA`, :func:`~picard.picard`,
:func:`~mne.preprocessing.infomax`.
.. note:: Picard can be used to solve the same problems as FastICA,
Infomax, and extended Infomax, but typically converges faster
than either of those methods. To make use of Picard's speed while
still obtaining the same solution as with other algorithms, you
need to specify ``method='picard'`` and ``fit_params`` as a
dictionary with the following combination of keys:
- ``dict(ortho=False, extended=False)`` for Infomax
- ``dict(ortho=False, extended=True)`` for extended Infomax
- ``dict(ortho=True, extended=True)`` for FastICA
Reducing the tolerance (set in ``fit_params``) speeds up estimation at the
cost of consistency of the obtained results. It is difficult to directly
compare tolerance levels between Infomax and Picard, but for Picard and
FastICA a good rule of thumb is ``tol_fastica == tol_picard ** 2``.
.. _eeglab_wiki: https://eeglab.org/tutorials/06_RejectArtifacts/RunICA.html#how-to-deal-with-corrupted-ica-decompositions
References
----------
.. footbibliography::
""" # noqa: E501
@verbose
def __init__(self, n_components=None, *, noise_cov=None,
random_state=None, method='fastica', fit_params=None,
max_iter='auto', allow_ref_meg=False,
verbose=None): # noqa: D102
_validate_type(method, str, 'method')
_validate_type(n_components, (float, 'int-like', None))
if method != 'imported_eeglab': # internal use only
_check_option('method', method, _KNOWN_ICA_METHODS)
if method == 'fastica' and not check_version('sklearn'):
raise ImportError(
'The scikit-learn package is required for method="fastica".')
if method == 'picard' and not check_version('picard'):
raise ImportError(
'The python-picard package is required for method="picard".')
self.noise_cov = noise_cov
for (kind, val) in [('n_components', n_components)]:
if isinstance(val, float) and not 0 < val < 1:
raise ValueError('Selecting ICA components by explained '
'variance needs values between 0.0 and 1.0 '
f'(exclusive), got {kind}={val}')
if isinstance(val, int_like) and val == 1:
raise ValueError(
f'Selecting one component with {kind}={val} is not '
'supported')
self.current_fit = 'unfitted'
self.verbose = verbose
self.n_components = n_components
# In newer ICAs this should always be None, but keep it for
# backward compat with older versions of MNE that used it
self._max_pca_components = None
self.n_pca_components = None
self.ch_names = None
self.random_state = random_state
if fit_params is None:
fit_params = {}
fit_params = deepcopy(fit_params) # avoid side effects
if method == 'fastica':
update = {'algorithm': 'parallel', 'fun': 'logcosh',
'fun_args': None}
fit_params.update({k: v for k, v in update.items() if k
not in fit_params})
elif method == 'infomax':
# extended=True is default in underlying function, but we want
# default False here unless user specified True:
fit_params.setdefault('extended', False)
_validate_type(max_iter, (str, 'int-like'), 'max_iter')
if isinstance(max_iter, str):
_check_option('max_iter', max_iter, ('auto',), 'when str')
if method == 'fastica':
max_iter = 1000
elif method in ['infomax', 'picard']:
max_iter = 500
fit_params.setdefault('max_iter', max_iter)
self.max_iter = max_iter
self.fit_params = fit_params
self.exclude = []
self.info = None
self.method = method
self.labels_ = dict()
self.allow_ref_meg = allow_ref_meg
def __repr__(self):
"""ICA fit information."""
if self.current_fit == 'unfitted':
s = 'no'
elif self.current_fit == 'raw':
s = 'raw data'
else:
s = 'epochs'
s += ' decomposition, '
s += 'fit (%s): %s samples, ' % (self.method,
str(getattr(self, 'n_samples_', '')))
s += ('%s components' % str(self.n_components_) if
hasattr(self, 'n_components_') else
'no dimension reduction')
if self.info is not None:
ch_fit = ['"%s"' % c for c in _DATA_CH_TYPES_SPLIT if c in self]
s += ', channels used: {}'.format('; '.join(ch_fit))
if self.exclude:
s += ', %i sources marked for exclusion' % len(self.exclude)
return '<ICA | %s>' % s
@verbose
def fit(self, inst, picks=None, start=None, stop=None, decim=None,
reject=None, flat=None, tstep=2.0, reject_by_annotation=True,
verbose=None):
"""Run the ICA decomposition on raw data.
Caveat! If supplying a noise covariance keep track of the projections
available in the cov, the raw or the epochs object. For example,
if you are interested in EOG or ECG artifacts, EOG and ECG projections
should be temporally removed before fitting the ICA.
Parameters
----------
inst : instance of Raw or Epochs
The data to be decomposed.
%(picks_good_data_noref)s
This selection remains throughout the initialized ICA solution.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
decim : int | None
Increment for selecting each nth time slice. If None, all samples
within ``start`` and ``stop`` are used.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog',
'ecg', 'hbo', 'hbr'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
It only applies if ``inst`` is of type Raw.
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad', 'mag', 'eeg', 'seeg', 'dbs', 'ecog', 'eog',
'ecg', 'hbo', 'hbr'.
Values are floats that set the minimum acceptable peak-to-peak
amplitude. If flat is None then no rejection is done.
It only applies if ``inst`` is of type Raw.
tstep : float
Length of data chunks for artifact rejection in seconds.
It only applies if ``inst`` is of type Raw.
%(reject_by_annotation_raw)s
.. versionadded:: 0.14.0
%(verbose_meth)s
Returns
-------
self : instance of ICA
Returns the modified instance.
"""
_validate_type(inst, (BaseRaw, BaseEpochs), 'inst', 'Raw or Epochs')
if np.isclose(inst.info['highpass'], 0.):
warn('The data has not been high-pass filtered. For good ICA '
'performance, it should be high-pass filtered (e.g., with a '
'1.0 Hz lower bound) before fitting ICA.')
if isinstance(inst, BaseEpochs) and inst.baseline is not None:
warn('The epochs you passed to ICA.fit() were baseline-corrected. '
'However, we suggest to fit ICA only on data that has been '
'high-pass filtered, but NOT baseline-corrected.')
picks = _picks_to_idx(inst.info, picks, allow_empty=False,
with_ref_meg=self.allow_ref_meg)
_check_for_unsupported_ica_channels(
picks, inst.info, allow_ref_meg=self.allow_ref_meg)
# Actually start fitting
t_start = time()
if self.current_fit != 'unfitted':
self._reset()
logger.info('Fitting ICA to data using %i channels '
'(please be patient, this may take a while)' % len(picks))
# n_components could be float 0 < x < 1, but that's okay here
if self.n_components is not None and self.n_components > len(picks):
raise ValueError(
f'ica.n_components ({self.n_components}) cannot '
f'be greater than len(picks) ({len(picks)})')
# filter out all the channels the raw wouldn't have initialized
self.info = pick_info(inst.info, picks)
if self.info['comps']:
self.info['comps'] = []
self.ch_names = self.info['ch_names']
if isinstance(inst, BaseRaw):
self._fit_raw(inst, picks, start, stop, decim, reject, flat,
tstep, reject_by_annotation, verbose)
else:
assert isinstance(inst, BaseEpochs)
self._fit_epochs(inst, picks, decim, verbose)
# sort ICA components by explained variance
var = _ica_explained_variance(self, inst)
var_ord = var.argsort()[::-1]
_sort_components(self, var_ord, copy=False)
t_stop = time()
logger.info("Fitting ICA took {:.1f}s.".format(t_stop - t_start))
return self
def _reset(self):
"""Aux method."""
for key in ('pre_whitener_', 'unmixing_matrix_', 'mixing_matrix_',
'n_components_', 'n_samples_', 'pca_components_',
'pca_explained_variance_',
'pca_mean_', 'n_iter_', 'drop_inds_', 'reject_'):
if hasattr(self, key):
delattr(self, key)
def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
reject_by_annotation, verbose):
"""Aux method."""
start, stop = _check_start_stop(raw, start, stop)
reject_by_annotation = 'omit' if reject_by_annotation else None
# this will be a copy
data = raw.get_data(picks, start, stop, reject_by_annotation)
# this will be a view
if decim is not None:
data = data[:, ::decim]
# this will make a copy
if (reject is not None) or (flat is not None):
self.reject_ = reject
data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
decim, self.info,
tstep)
self.n_samples_ = data.shape[1]
self._fit(data, 'raw')
return self
def _fit_epochs(self, epochs, picks, decim, verbose):
"""Aux method."""
if epochs.events.size == 0:
raise RuntimeError('Tried to fit ICA with epochs, but none were '
'found: epochs.events is "{}".'
.format(epochs.events))
# this should be a copy (picks a list of int)
data = epochs.get_data()[:, picks]
# this will be a view
if decim is not None:
data = data[:, :, ::decim]
self.n_samples_ = data.shape[0] * data.shape[2]
# This will make at least one copy (one from hstack, maybe one
# more from _pre_whiten)
data = np.hstack(data)
self._fit(data, 'epochs')
return self
def _compute_pre_whitener(self, data):
"""Aux function."""
data = self._do_proj(data, log_suffix='(pre-whitener computation)')
if self.noise_cov is None:
# use standardization as whitener
# Scale (z-score) the data by channel type
info = self.info
pre_whitener = np.empty([len(data), 1])
for ch_type in _DATA_CH_TYPES_SPLIT + ('eog', "ref_meg"):
if _contains_ch_type(info, ch_type):
if ch_type == 'seeg':
this_picks = pick_types(info, meg=False, seeg=True)
elif ch_type == 'dbs':
this_picks = pick_types(info, meg=False, dbs=True)
elif ch_type == 'ecog':
this_picks = pick_types(info, meg=False, ecog=True)
elif ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
elif ch_type in ('mag', 'grad'):
this_picks = pick_types(info, meg=ch_type)
elif ch_type == 'eog':
this_picks = pick_types(info, meg=False, eog=True)
elif ch_type in ('hbo', 'hbr'):
this_picks = pick_types(info, meg=False, fnirs=ch_type)
elif ch_type == 'ref_meg':
this_picks = pick_types(info, meg=False, ref_meg=True)
else:
raise RuntimeError('Should not be reached.'
'Unsupported channel {}'
.format(ch_type))
pre_whitener[this_picks] = np.std(data[this_picks])
else:
pre_whitener, _ = compute_whitener(self.noise_cov, self.info)
assert data.shape[0] == pre_whitener.shape[1]
self.pre_whitener_ = pre_whitener
def _do_proj(self, data, log_suffix=''):
if self.info is not None and self.info['projs']:
proj, nproj, _ = make_projector(
[p for p in self.info['projs'] if p['active']],
self.info['ch_names'], include_active=True)
if nproj:
logger.info(
f' Applying projection operator with {nproj} '
f'vector{_pl(nproj)}'
f'{" " if log_suffix else ""}{log_suffix}')
if self.noise_cov is None: # otherwise it's in pre_whitener_
data = proj @ data
return data
def _pre_whiten(self, data):
data = self._do_proj(data, log_suffix='(pre-whitener application)')
if self.noise_cov is None:
data /= self.pre_whitener_
else:
data = self.pre_whitener_ @ data
return data
def _fit(self, data, fit_type):
"""Aux function."""
random_state = check_random_state(self.random_state)
n_channels, n_samples = data.shape
self._compute_pre_whitener(data)
data = self._pre_whiten(data)
pca = _PCA(n_components=self._max_pca_components, whiten=True)
data = pca.fit_transform(data.T)
use_ev = pca.explained_variance_ratio_
n_pca = self.n_pca_components
if isinstance(n_pca, float):
n_pca = int(_exp_var_ncomp(use_ev, n_pca)[0])
elif n_pca is None:
n_pca = len(use_ev)
assert isinstance(n_pca, (int, np.int_))
# If user passed a float, select the PCA components explaining the
# given cumulative variance. This information will later be used to
# only submit the corresponding parts of the data to ICA.
if self.n_components is None:
# None case: check if n_pca_components or 0.999999 yields smaller
msg = 'Selecting by non-zero PCA components'
self.n_components_ = min(
n_pca, _exp_var_ncomp(use_ev, 0.999999)[0])
elif isinstance(self.n_components, float):
self.n_components_, ev = _exp_var_ncomp(use_ev, self.n_components)
if self.n_components_ == 1:
raise RuntimeError(
'One PCA component captures most of the '
f'explained variance ({100 * ev}%), your threshold '
'results in 1 component. You should select '
'a higher value.')
msg = 'Selecting by explained variance'
else:
msg = 'Selecting by number'
self.n_components_ = _ensure_int(self.n_components)
# check to make sure something okay happened
if self.n_components_ > n_pca:
ev = np.cumsum(use_ev)
ev /= ev[-1]
evs = 100 * ev[[self.n_components_ - 1, n_pca - 1]]
raise RuntimeError(
f'n_components={self.n_components} requires '
f'{self.n_components_} PCA values (EV={evs[0]:0.1f}%) but '
f'n_pca_components ({self.n_pca_components}) results in '
f'only {n_pca} components (EV={evs[1]:0.1f}%)')
logger.info('%s: %s components' % (msg, self.n_components_))
# the things to store for PCA
self.pca_mean_ = pca.mean_
self.pca_components_ = pca.components_
self.pca_explained_variance_ = pca.explained_variance_
del pca
# update number of components
self._update_ica_names()
if self.n_pca_components is not None and \
self.n_pca_components > len(self.pca_components_):
raise ValueError(
f'n_pca_components ({self.n_pca_components}) is greater than '
f'the number of PCA components ({len(self.pca_components_)})')
# take care of ICA
sel = slice(0, self.n_components_)
if self.method == 'fastica':
from sklearn.decomposition import FastICA
ica = FastICA(
whiten=False, random_state=random_state, **self.fit_params)
ica.fit(data[:, sel])
self.unmixing_matrix_ = ica.components_
self.n_iter_ = ica.n_iter_
elif self.method in ('infomax', 'extended-infomax'):
unmixing_matrix, n_iter = infomax(
data[:, sel], random_state=random_state, return_n_iter=True,
**self.fit_params)
self.unmixing_matrix_ = unmixing_matrix
self.n_iter_ = n_iter
del unmixing_matrix, n_iter
elif self.method == 'picard':
from picard import picard
_, W, _, n_iter = picard(
data[:, sel].T, whiten=False, return_n_iter=True,
random_state=random_state, **self.fit_params)
self.unmixing_matrix_ = W
self.n_iter_ = n_iter + 1 # picard() starts counting at 0
del _, n_iter
assert self.unmixing_matrix_.shape == (self.n_components_,) * 2
norms = self.pca_explained_variance_
stable = norms / norms[0] > 1e-6 # to be stable during pinv
norms = norms[:self.n_components_]
if not stable[self.n_components_ - 1]:
max_int = np.where(stable)[0][-1] + 1
warn(f'Using n_components={self.n_components} (resulting in '
f'n_components_={self.n_components_}) may lead to an '
f'unstable mixing matrix estimation because the ratio '
f'between the largest ({norms[0]:0.2g}) and smallest '
f'({norms[-1]:0.2g}) variances is too large (> 1e6); '
f'consider setting n_components=0.999999 or an '
f'integer <= {max_int}')
norms = np.sqrt(norms)
norms[norms == 0] = 1.
self.unmixing_matrix_ /= norms # whitening
self._update_mixing_matrix()
self.current_fit = fit_type
def _update_mixing_matrix(self):
from scipy import linalg
self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
def _update_ica_names(self):
"""Update ICA names when n_components_ is set."""
self._ica_names = ['ICA%03d' % ii for ii in range(self.n_components_)]
def _transform(self, data):
"""Compute sources from data (operates inplace)."""
data = self._pre_whiten(data)
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
# Apply unmixing
pca_data = np.dot(self.unmixing_matrix_,
self.pca_components_[:self.n_components_])
# Apply PCA
sources = np.dot(pca_data, data)
return sources
def _transform_raw(self, raw, start, stop, reject_by_annotation=False):
"""Transform raw data."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Raw doesn\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Raw compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
reject = 'omit' if reject_by_annotation else None
data = raw.get_data(picks, start, stop, reject)
return self._transform(data)
def _transform_epochs(self, epochs, concatenate):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(epochs.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data()[:, picks])
sources = self._transform(data)
if not concatenate:
# Put the data back in 3D
sources = np.array(np.split(sources, len(epochs.events), 1))
return sources
def _transform_evoked(self, evoked):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = pick_types(evoked.info, include=self.ch_names, exclude='bads',
meg=False, ref_meg=False)
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked doesn\'t match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide Evoked compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
sources = self._transform(evoked.data[picks])
return sources
def get_components(self):
"""Get ICA topomap for components as numpy arrays.
Returns
-------
components : array, shape (n_channels, n_components)
The ICA components (maps).
"""
return np.dot(self.mixing_matrix_[:, :self.n_components_].T,
self.pca_components_[:self.n_components_]).T
def get_sources(self, inst, add_channels=None, start=None, stop=None):
"""Estimate sources given the unmixing matrix.
This method will return the sources in the container format passed.
Typical usecases:
1. pass Raw object to use `raw.plot <mne.io.Raw.plot>` for ICA sources
2. pass Epochs object to compute trial-based statistics in ICA space
3. pass Evoked object to investigate time-locking in ICA space
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from and to represent sources in.
add_channels : None | list of str
Additional channels to be added. Useful to e.g. compare sources
with some reference. Defaults to None.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
Returns
-------
sources : instance of Raw, Epochs or Evoked
The ICA sources time series.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._sources_as_raw(inst, add_channels, start, stop)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._sources_as_epochs(inst, add_channels, False)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._sources_as_evoked(inst, add_channels)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return sources
def _sources_as_raw(self, raw, add_channels, start, stop):
"""Aux method."""
# merge copied instance and picked data with sources
start, stop = _check_start_stop(raw, start, stop)
data_ = self._transform_raw(raw, start=start, stop=stop)
assert data_.shape[1] == stop - start
if raw.preload: # get data and temporarily delete
data = raw._data
del raw._data
out = raw.copy() # copy and reappend
if raw.preload:
raw._data = data
# populate copied raw.
if add_channels is not None and len(add_channels):
picks = pick_channels(raw.ch_names, add_channels)
data_ = np.concatenate([
data_, raw.get_data(picks, start=start, stop=stop)])
out._data = data_
out._filenames = [None]
out.preload = True
out._first_samps[:] = [out.first_samp + start]
out._last_samps[:] = [out.first_samp + data_.shape[1] - 1]
out._projector = None
self._export_info(out.info, raw, add_channels)
return out
def _sources_as_epochs(self, epochs, add_channels, concatenate):
"""Aux method."""
out = epochs.copy()
sources = self._transform_epochs(epochs, concatenate)
if add_channels is not None:
picks = [epochs.ch_names.index(k) for k in add_channels]
else:
picks = []
out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
axis=1) if len(picks) > 0 else sources
self._export_info(out.info, epochs, add_channels)
out.preload = True
out._raw = None
out._projector = None
return out
def _sources_as_evoked(self, evoked, add_channels):
"""Aux method."""
if add_channels is not None:
picks = [evoked.ch_names.index(k) for k in add_channels]
else:
picks = []
sources = self._transform_evoked(evoked)
if len(picks) > 1:
data = np.r_[sources, evoked.data[picks]]
else:
data = sources
out = evoked.copy()
out.data = data
self._export_info(out.info, evoked, add_channels)
return out
def _export_info(self, info, container, add_channels):
"""Aux method."""
# set channel names and info
ch_names = []
ch_info = info['chs'] = []
for ii, name in enumerate(self._ica_names):
ch_names.append(name)
ch_info.append(dict(
ch_name=name, cal=1, logno=ii + 1,
coil_type=FIFF.FIFFV_COIL_NONE, kind=FIFF.FIFFV_MISC_CH,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_NONE,
loc=np.zeros(12, dtype='f4'),
range=1.0, scanno=ii + 1, unit_mul=0))
if add_channels is not None:
# re-append additionally picked ch_names
ch_names += add_channels
# re-append additionally picked ch_info
ch_info += [k for k in container.info['chs'] if k['ch_name'] in
add_channels]
info['bads'] = [ch_names[k] for k in self.exclude]
info['projs'] = [] # make sure projections are removed.
info._update_redundant()
info._check_consistency()
@verbose
def score_sources(self, inst, target=None, score_func='pearsonr',
start=None, stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, verbose=None):
"""Assign score to components based on statistic or metric.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The object to reconstruct the sources from.
target : array-like | str | None
Signal to which the sources shall be compared. It has to be of
the same shape as the sources. If str, a routine will try to find
a matching channel name. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
score_func : callable | str
Callable taking as arguments either two input arrays
(e.g. Pearson correlation) or one input
array (e. g. skewness) and returns a float. For convenience the
most common score_funcs are available via string labels:
Currently, all distance metrics from scipy.spatial and All
functions from scipy.stats taking compatible input arguments are
supported. These function have been modified to support iteration
over the rows of a 2D array.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
%(verbose_meth)s
Returns
-------
scores : ndarray
Scores for each source as returned from score_func.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._transform_raw(inst, start, stop,
reject_by_annotation)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._transform_epochs(inst, concatenate=True)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._transform_evoked(inst)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
if target is not None: # we can have univariate metrics without target
target = self._check_target(target, inst, start, stop,
reject_by_annotation)
if sources.shape[-1] != target.shape[-1]:
raise ValueError('Sources and target do not have the same '
'number of time slices.')
# auto target selection
if isinstance(inst, BaseRaw):
# We pass inst, not self, because the sfreq of the data we
# use for scoring components can be different:
sources, target = _band_pass_filter(inst, sources, target,
l_freq, h_freq)
scores = _find_sources(sources, target, score_func)
return scores
def _check_target(self, target, inst, start, stop,
reject_by_annotation=False):
"""Aux Method."""
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
start, stop = _check_start_stop(inst, start, stop)
if hasattr(target, 'ndim'):
if target.ndim < 2:
target = target.reshape(1, target.shape[-1])
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data(pick, start, stop, reject_by_annotation)
elif isinstance(inst, BaseEpochs):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data()[:, pick]
if hasattr(target, 'ndim'):
if target.ndim == 3 and min(target.shape) == 1:
target = target.ravel()
elif isinstance(inst, Evoked):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.data[pick]
return target
def _find_bads_ch(self, inst, chs, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, prefix='chs',
measure='zscore'):
"""Compute ExG/ref components.
See find_bads_ecg, find_bads_eog, and find_bads_ref for details.
"""
scores, idx = [], []
# some magic we need inevitably ...
# get targets before equalizing
targets = [self._check_target(
ch, inst, start, stop, reject_by_annotation) for ch in chs]
# assign names, if targets are arrays instead of strings
target_names = []
for ch in chs:
if not isinstance(ch, str):
if prefix == "ecg":
target_names.append('ECG-MAG')
else:
target_names.append(prefix)
else:
target_names.append(ch)
for ii, (ch, target) in enumerate(zip(target_names, targets)):
scores += [self.score_sources(
inst, target=target, score_func='pearsonr', start=start,
stop=stop, l_freq=l_freq, h_freq=h_freq,
reject_by_annotation=reject_by_annotation)]
# pick last scores
if measure == "zscore":
this_idx = _find_outliers(scores[-1], threshold=threshold)
elif measure == "correlation":
this_idx = np.where(abs(scores[-1]) > threshold)[0]
else:
raise ValueError("Unknown measure {}".format(measure))
idx += [this_idx]
self.labels_['%s/%i/' % (prefix, ii) + ch] = list(this_idx)
# remove duplicates but keep order by score, even across multiple
# ref channels
scores_ = np.concatenate([scores[ii][inds]
for ii, inds in enumerate(idx)])
idx_ = np.concatenate(idx)[np.abs(scores_).argsort()[::-1]]
idx_unique = list(np.unique(idx_))
idx = []
for i in idx_:
if i in idx_unique:
idx.append(i)
idx_unique.remove(i)
if len(scores) == 1:
scores = scores[0]
labels = list(idx)
return labels, scores
def _get_ctps_threshold(self, pk_threshold=20):
"""Automatically decide the threshold of Kuiper index for CTPS method.
This function finds the threshold of Kuiper index based on the
threshold of pk. Kuiper statistic that minimizes the difference between
pk and the pk threshold (defaults to 20 [1]) is returned. It is assumed
that the data are appropriately filtered and bad data are rejected at
least based on peak-to-peak amplitude when/before running the ICA
decomposition on data.
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), pp.2356.
"""
N = self.info['sfreq']
Vs = np.arange(1, 100) / 100
C = math.sqrt(N) + 0.155 + 0.24 / math.sqrt(N)
# in formula (13), when k gets large, only k=1 matters for the
# summation. k*V*C thus becomes V*C
Pks = 2 * (4 * (Vs * C)**2 - 1) * (np.exp(-2 * (Vs * C)**2))
# NOTE: the threshold of pk is transformed to Pk for comparison
# pk = -log10(Pk)
return Vs[np.argmin(np.abs(Pks - 10**(-pk_threshold)))]
@verbose
def find_bads_ecg(self, inst, ch_name=None, threshold='auto', start=None,
stop=None, l_freq=8, h_freq=16, method='ctps',
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect ECG related components.
Cross-trial phase statistics (default) or Pearson correlation can be
used for detection.
.. note:: If no ECG channel is available, routine attempts to create
an artificial ECG based on cross-channel averaging.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for ECG peak detection.
The argument is mandatory if the dataset contains no ECG
channels.
threshold : float | str
The value above which a feature is classified as outlier. If 'auto'
and method is 'ctps', automatically compute the threshold. If
'auto' and method is 'correlation', defaults to 3.0. The default
translates to 0.25 for 'ctps' and 3.0 for 'correlation' in version
0.21 but will change to 'auto' in version 0.22.
.. versionchanged:: 0.21
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
method : {'ctps', 'correlation'}
The method used for detection. If 'ctps', cross-trial phase
statistics [1] are used to detect ECG related components.
Thresholding is then based on the significance value of a Kuiper
statistic.
If 'correlation', detection is based on Pearson correlation
between the filtered data and the filtered ECG channel.
Thresholding is based on iterative z-scoring. The above
threshold components will be masked and the z-score will
be recomputed until no supra-threshold component remains.
Defaults to 'ctps'.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
ecg_idx : list of int
The indices of ECG-related components.
scores : np.ndarray of float, shape (``n_components_``)
If method is 'ctps', the normalized Kuiper index scores. If method
is 'correlation', the correlation scores.
See Also
--------
find_bads_eog, find_bads_ref
References
----------
[1] Dammers, J., Schiek, M., Boers, F., Silex, C., Zvyagintsev,
M., Pietrzyk, U., Mathiak, K., 2008. Integration of amplitude
and phase statistics for complete artifact removal in independent
components of neuromagnetic recordings. Biomedical
Engineering, IEEE Transactions on 55 (10), 2353-2362.
"""
idx_ecg = _get_ecg_channel_index(ch_name, inst)
if idx_ecg is None:
ecg, times = _make_ecg(inst, start, stop,
reject_by_annotation=reject_by_annotation)
else:
ecg = inst.ch_names[idx_ecg]
_validate_type(threshold, (str, 'numeric'), 'threshold')
if isinstance(threshold, str):
_check_option('threshold', threshold, ('auto',), extra='when str')
if method == 'ctps':
if threshold == 'auto':
threshold = self._get_ctps_threshold()
logger.info('Using threshold: %.2f for CTPS ECG detection'
% threshold)
if isinstance(inst, BaseRaw):
sources = self.get_sources(create_ecg_epochs(
inst, ch_name, l_freq=l_freq, h_freq=h_freq,
keep_ecg=False,
reject_by_annotation=reject_by_annotation)).get_data()
if sources.shape[0] == 0:
warn('No ECG activity detected. Consider changing '
'the input parameters.')
elif isinstance(inst, BaseEpochs):
sources = self.get_sources(inst).get_data()
else:
raise ValueError('With `ctps` only Raw and Epochs input is '
'supported')
_, p_vals, _ = ctps(sources)
scores = p_vals.max(-1)
ecg_idx = np.where(scores >= threshold)[0]
# sort indices by scores
ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
self.labels_['ecg'] = list(ecg_idx)
if ch_name is None:
ch_name = 'ECG-MAG'
self.labels_['ecg/%s' % ch_name] = list(ecg_idx)
elif method == 'correlation':
if threshold == 'auto':
threshold = 3.0
self.labels_['ecg'], scores = self._find_bads_ch(
inst, [ecg], threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="ecg",
reject_by_annotation=reject_by_annotation, measure=measure)
else:
raise ValueError('Method "%s" not supported.' % method)
return self.labels_['ecg'], scores
@verbose
def find_bads_ref(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, method='together',
measure="zscore", verbose=None):
"""Detect MEG reference related components using correlation.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from. Should contain at least one channel
i.e. component derived from MEG reference channels.
ch_name : list of str
Which MEG reference components to use. If None, then all channels
that begin with REF_ICA.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
method : 'together' | 'separate'
Method to use to identify reference channel related components.
Defaults to ``'together'``. See notes.
.. versionadded:: 0.21
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
ref_idx : list of int
The indices of MEG reference related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_eog
Notes
-----
ICA decomposition on MEG reference channels is used to assess external
magnetic noise and remove it from the MEG. Two methods are supported:
With the "together" method, only one ICA fit is used, which
encompasses both MEG and reference channels together. Components which
have particularly strong weights on the reference channels may be
thresholded and marked for removal.
With "separate," selected components from a separate ICA decomposition
on the reference channels are used as a ground truth for identifying
bad components in an ICA fit done on MEG channels only. The logic here
is similar to an EOG/ECG, with reference components replacing the
EOG/ECG channels. Recommended procedure is to perform ICA separately
on reference channels, extract them using .get_sources(), and then
append them to the inst using :meth:`~mne.io.Raw.add_channels`,
preferably with the prefix ``REF_ICA`` so that they can be
automatically detected.
Thresholding in both cases is based on adaptive z-scoring:
The above-threshold components will be masked and the z-score will be
recomputed until no supra-threshold component remains.
Validation and further documentation for this technique can be found
in :footcite:`HannaEtAl2020`.
.. versionadded:: 0.18
References
----------
.. footbibliography::
"""
if method == "separate":
if not ch_name:
inds = pick_channels_regexp(inst.ch_names, 'REF_ICA*')
else:
inds = pick_channels(inst.ch_names, ch_name)
# regexp returns list, pick_channels returns numpy
inds = list(inds)
if not inds:
raise ValueError('No valid channels available.')
ref_chs = [inst.ch_names[k] for k in inds]
self.labels_['ref_meg'], scores = self._find_bads_ch(
inst, ref_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix='ref_meg',
reject_by_annotation=reject_by_annotation,
measure=measure)
elif method == 'together':
meg_picks = pick_types(self.info, meg=True, ref_meg=False)
ref_picks = pick_types(self.info, meg=False, ref_meg=True)
if not any(meg_picks) or not any(ref_picks):
raise ValueError('ICA solution must contain both reference and\
MEG channels.')
weights = self.get_components()
# take norm of component weights on reference channels for each
# component, divide them by the norm on the standard channels,
# log transform to approximate normal distribution
normrats = np.linalg.norm(weights[ref_picks],
axis=0) / np.linalg.norm(weights[meg_picks], # noqa
axis=0)
scores = np.log(normrats)
self.labels_['ref_meg'] = list(_find_outliers(scores,
threshold=threshold,
tail=1))
else:
raise ValueError('Method "%s" not supported.' % method)
return self.labels_['ref_meg'], scores
@verbose
def find_bads_eog(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=1, h_freq=10,
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect EOG related components using correlation.
Detection is based on Pearson correlation between the
filtered data and the filtered EOG channel.
Thresholding is based on adaptive z-scoring. The above threshold
components will be masked and the z-score will be recomputed
until no supra-threshold component remains.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG
channels.
threshold : int | float
The value above which a feature is classified as outlier.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
measure : 'zscore' | 'correlation'
Which method to use for finding outliers. ``'zscore'`` (default) is
the iterated Z-scoring method, and ``'correlation'`` is an absolute
raw correlation threshold with a range of 0 to 1.
.. versionadded:: 0.21
%(verbose_meth)s
Returns
-------
eog_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_ref
"""
eog_inds = _get_eog_channel_index(ch_name, inst)
eog_chs = [inst.ch_names[k] for k in eog_inds]
self.labels_['eog'], scores = self._find_bads_ch(
inst, eog_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="eog",
reject_by_annotation=reject_by_annotation, measure=measure)
return self.labels_['eog'], scores
@verbose
def apply(self, inst, include=None, exclude=None, n_pca_components=None,
start=None, stop=None, verbose=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform the data,
zero out all excluded components, and inverse-transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be processed (i.e., cleaned). It will be modified
in-place.
include : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be kept.
exclude : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be zeroed out.
%(n_pca_components_apply)s
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
%(verbose_meth)s
Returns
-------
out : instance of Raw, Epochs or Evoked
The processed data.
Notes
-----
.. note:: Applying ICA may introduce a DC shift. If you pass
baseline-corrected `~mne.Epochs` or `~mne.Evoked` data,
the baseline period of the cleaned data may not be of
zero mean anymore. If you require baseline-corrected
data, apply baseline correction again after cleaning
via ICA. A warning will be emitted to remind you of this
fact if you pass baseline-corrected data.
.. versionchanged:: 0.23
Warn if instance was baseline-corrected.
"""
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst',
'Raw, Epochs, or Evoked')
kwargs = dict(include=include, exclude=exclude,
n_pca_components=n_pca_components)
if isinstance(inst, BaseRaw):
kind, meth = 'Raw', self._apply_raw
kwargs.update(raw=inst, start=start, stop=stop)
elif isinstance(inst, BaseEpochs):
kind, meth = 'Epochs', self._apply_epochs
kwargs.update(epochs=inst)
else: # isinstance(inst, Evoked):
kind, meth = 'Evoked', self._apply_evoked
kwargs.update(evoked=inst)
_check_compensation_grade(self.info, inst.info, 'ICA', kind,
ch_names=self.ch_names)
if isinstance(inst, (BaseEpochs, Evoked)):
if getattr(inst, 'baseline', None) is not None:
warn('The data you passed to ICA.apply() was '
'baseline-corrected. Please note that ICA can introduce '
'DC shifts, therefore you may wish to consider '
'baseline-correcting the cleaned data again.')
logger.info(f'Applying ICA to {kind} instance')
return meth(**kwargs)
def _check_exclude(self, exclude):
if exclude is None:
return list(set(self.exclude))
else:
# Allow both self.exclude and exclude to be array-like:
return list(set(self.exclude).union(set(exclude)))
def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):
"""Aux method."""
_check_preload(raw, "ica.apply")
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads', ref_meg=False)
data = raw[picks, start:stop][0]
data = self._pick_sources(data, include, exclude, n_pca_components)
raw[picks, start:stop] = data
return raw
def _apply_epochs(self, epochs, include, exclude, n_pca_components):
"""Aux method."""
_check_preload(epochs, "ica.apply")
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data(picks))
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore epochs, channels, tsl order
epochs._data[:, picks] = np.array(
np.split(data, len(epochs.events), 1))
epochs.preload = True
return epochs
def _apply_evoked(self, evoked, include, exclude, n_pca_components):
"""Aux method."""
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with ica.ch_names' % (len(self.ch_names),
len(picks)))
data = evoked.data[picks]
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore evoked
evoked.data[picks] = data
return evoked
def _pick_sources(self, data, include, exclude, n_pca_components):
"""Aux function."""
if n_pca_components is None:
n_pca_components = self.n_pca_components
data = self._pre_whiten(data)
exclude = self._check_exclude(exclude)
_n_pca_comp = self._check_n_pca_components(n_pca_components)
n_ch, _ = data.shape
max_pca_components = self.pca_components_.shape[0]
if not self.n_components_ <= _n_pca_comp <= max_pca_components:
raise ValueError(
f'n_pca_components ({_n_pca_comp}) must be >= '
f'n_components_ ({self.n_components_}) and <= '
'the total number of PCA components '
f'({max_pca_components}).')
logger.info(f' Transforming to ICA space ({self.n_components_} '
f'component{_pl(self.n_components_)})')
# Apply first PCA
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
sel_keep = np.arange(self.n_components_)
if include not in (None, []):
sel_keep = np.unique(include)
elif exclude not in (None, []):
sel_keep = np.setdiff1d(np.arange(self.n_components_), exclude)
n_zero = self.n_components_ - len(sel_keep)
logger.info(f' Zeroing out {n_zero} ICA component{_pl(n_zero)}')
# Mixing and unmixing should both be shape (self.n_components_, 2),
# and we need to put these into the upper left part of larger mixing
# and unmixing matrices of shape (n_ch, _n_pca_comp)
pca_components = self.pca_components_[:_n_pca_comp]
assert pca_components.shape == (_n_pca_comp, n_ch)
assert self.unmixing_matrix_.shape == \
self.mixing_matrix_.shape == \
(self.n_components_,) * 2
unmixing = np.eye(_n_pca_comp)
unmixing[:self.n_components_, :self.n_components_] = \
self.unmixing_matrix_
unmixing = np.dot(unmixing, pca_components)
logger.info(f' Projecting back using {_n_pca_comp} '
f'PCA component{_pl(_n_pca_comp)}')
mixing = np.eye(_n_pca_comp)
mixing[:self.n_components_, :self.n_components_] = \
self.mixing_matrix_
mixing = pca_components.T @ mixing
assert mixing.shape == unmixing.shape[::-1] == (n_ch, _n_pca_comp)
# keep requested components plus residuals (if any)
sel_keep = np.concatenate(
(sel_keep, np.arange(self.n_components_, _n_pca_comp)))
proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])
data = np.dot(proj_mat, data)
assert proj_mat.shape == (n_ch,) * 2
if self.pca_mean_ is not None:
data += self.pca_mean_[:, None]
# restore scaling
if self.noise_cov is None: # revert standardization
data *= self.pre_whitener_
else:
data = np.linalg.pinv(self.pre_whitener_, rcond=1e-14) @ data
return data
@verbose
def save(self, fname, verbose=None):
"""Store ICA solution into a fiff file.
Parameters
----------
fname : str
The absolute path of the file name to save the ICA solution into.
The file name should end with -ica.fif or -ica.fif.gz.
%(verbose_meth)s
Returns
-------
ica : instance of ICA
The object.
See Also
--------
read_ica
"""
if self.current_fit == 'unfitted':
raise RuntimeError('No fit available. Please first fit ICA')
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Writing ICA solution to %s...' % fname)
fid = start_file(fname)
try:
_write_ica(fid, self)
end_file(fid)
except Exception:
end_file(fid)
os.remove(fname)
raise
return self
def copy(self):
"""Copy the ICA object.
Returns
-------
ica : instance of ICA
The copied object.
"""
return deepcopy(self)
@copy_function_doc_to_method_doc(plot_ica_components)
def plot_components(self, picks=None, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=False, title=None, show=True, outlines='head',
contours=6, image_interp='bilinear',
inst=None, plot_std=True, topomap_args=None,
image_args=None, psd_args=None, reject='auto',
sphere=None, verbose=None):
return plot_ica_components(self, picks=picks, ch_type=ch_type,
res=res, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title, show=show,
outlines=outlines, contours=contours,
image_interp=image_interp,
inst=inst, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
reject=reject, sphere=sphere,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_properties)
def plot_properties(self, inst, picks=None, axes=None, dB=True,
plot_std=True, topomap_args=None, image_args=None,
psd_args=None, figsize=None, show=True, reject='auto',
reject_by_annotation=True, *, verbose=None):
return plot_ica_properties(self, inst, picks=picks, axes=axes,
dB=dB, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show, reject=reject,
reject_by_annotation=reject_by_annotation,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_sources)
def plot_sources(self, inst, picks=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False, show_scrollbars=True):
return plot_ica_sources(self, inst=inst, picks=picks,
start=start, stop=stop, title=title, show=show,
block=block, show_first_samp=show_first_samp,
show_scrollbars=show_scrollbars)
@copy_function_doc_to_method_doc(plot_ica_scores)
def plot_scores(self, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None, n_cols=None,
show=True):
return plot_ica_scores(
ica=self, scores=scores, exclude=exclude, labels=labels,
axhline=axhline, title=title, figsize=figsize, n_cols=n_cols,
show=show)
@copy_function_doc_to_method_doc(plot_ica_overlay)
def plot_overlay(self, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True, n_pca_components=None):
return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
start=start, stop=stop, title=title, show=show,
n_pca_components=n_pca_components)
def detect_artifacts(self, raw, start_find=None, stop_find=None,
ecg_ch=None, ecg_score_func='pearsonr',
ecg_criterion=0.1, eog_ch=None,
eog_score_func='pearsonr',
eog_criterion=0.1, skew_criterion=0,
kurt_criterion=0, var_criterion=-1,
add_nodes=None):
"""Run ICA artifacts detection workflow.
Note. This is still experimental and will most likely change over
the next releases. For maximum control use the workflow exposed in
the examples.
Hints and caveats:
- It is highly recommended to bandpass filter ECG and EOG
data and pass them instead of the channel names as ecg_ch and eog_ch
arguments.
- please check your results. Detection by kurtosis and variance
may be powerful but misclassification of brain signals as
noise cannot be precluded.
- Consider using shorter times for start_find and stop_find than
for start and stop. It can save you much time.
Example invocation (taking advantage of the defaults)::
ica.detect_artifacts(ecg_channel='MEG 1531', eog_channel='EOG 061')
Parameters
----------
raw : instance of Raw
Raw object to draw sources from. No components are actually removed
here, i.e. ica is not applied to raw in this function. Use
`ica.apply() <ICA.apply>` for this after inspection of the
identified components.
start_find : int | float | None
First sample to include for artifact search. If float, data will be
interpreted as time in seconds. If None, data will be used from the
first sample.
stop_find : int | float | None
Last sample to not include for artifact search. If float, data will
be interpreted as time in seconds. If None, data will be used to
the last sample.
ecg_ch : str | ndarray | None
The ``target`` argument passed to ica.find_sources_raw. Either the
name of the ECG channel or the ECG time series. If None, this step
will be skipped.
ecg_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
ecg_criterion : float | int | list-like | slice
The indices of the sorted ecg scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
eog_ch : list | str | ndarray | None
The ``target`` argument or the list of target arguments
subsequently passed to ica.find_sources_raw. Either the name of the
vertical EOG channel or the corresponding EOG time series. If None,
this step will be skipped.
eog_score_func : str | callable
The ``score_func`` argument passed to ica.find_sources_raw. Either
the name of function supported by ICA or a custom function.
eog_criterion : float | int | list-like | slice
The indices of the sorted eog scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
skew_criterion : float | int | list-like | slice
The indices of the sorted skewness scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
kurt_criterion : float | int | list-like | slice
The indices of the sorted kurtosis scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
var_criterion : float | int | list-like | slice
The indices of the sorted variance scores. If float, sources with
absolute scores greater than the criterion will be dropped. Else,
the absolute scores sorted in descending order will be indexed
accordingly. E.g. range(2) would return the two sources with the
highest absolute score. If None, this step will be skipped.
add_nodes : list of tuple
Additional list if tuples carrying the following parameters
of ica nodes:
(name : str, target : str | array, score_func : callable,
criterion : float | int | list-like | slice). This parameter is a
generalization of the artifact specific parameters above and has
the same structure. Example::
add_nodes=('ECG phase lock', ECG 01',
my_phase_lock_function, 0.5)
Returns
-------
self : instance of ICA
The ICA object with the detected artifact indices marked for
exclusion.
"""
logger.info(' Searching for artifacts...')
_detect_artifacts(self, raw=raw, start_find=start_find,
stop_find=stop_find, ecg_ch=ecg_ch,
ecg_score_func=ecg_score_func,
ecg_criterion=ecg_criterion,
eog_ch=eog_ch, eog_score_func=eog_score_func,
eog_criterion=eog_criterion,
skew_criterion=skew_criterion,
kurt_criterion=kurt_criterion,
var_criterion=var_criterion,
add_nodes=add_nodes)
return self
@verbose
def _check_n_pca_components(self, _n_pca_comp, verbose=None):
"""Aux function."""
if isinstance(_n_pca_comp, float):
n, ev = _exp_var_ncomp(
self.pca_explained_variance_, _n_pca_comp)
logger.info(f' Selected {n} PCA components by explained '
f'variance ({100 * ev}≥{100 * _n_pca_comp}%)')
_n_pca_comp = n
elif _n_pca_comp is None:
_n_pca_comp = self._max_pca_components
if _n_pca_comp is None:
_n_pca_comp = self.pca_components_.shape[0]
elif _n_pca_comp < self.n_components_:
_n_pca_comp = self.n_components_
return _n_pca_comp
def _exp_var_ncomp(var, n):
cvar = np.asarray(var, dtype=np.float64)
cvar = cvar.cumsum()
cvar /= cvar[-1]
# We allow 1., which would give us N+1
n = min((cvar <= n).sum() + 1, len(cvar))
return n, cvar[n - 1]
def _check_start_stop(raw, start, stop):
"""Aux function."""
out = list()
for st, none_ in ((start, 0), (stop, raw.n_times)):
if st is None:
out.append(none_)
else:
try:
out.append(_ensure_int(st))
except TypeError: # not int-like
out.append(raw.time_as_index(st)[0])
return out
@verbose
def ica_find_ecg_events(raw, ecg_source, event_id=999,
tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
verbose=None):
"""Find ECG peaks from one selected ICA source.
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
ecg_source : ndarray
ICA source resembling ECG to find peaks from.
event_id : int
The index to assign to found events.
tstart : float
Start detection after tstart seconds. Useful when beginning
of run is noisy.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
%(verbose)s
Returns
-------
ecg_events : array
Events.
ch_ECG : string
Name of channel used.
average_pulse : float.
Estimated average pulse.
"""
logger.info('Using ICA source to identify heart beats')
# detecting QRS and generating event file
ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
tstart=tstart, thresh_value=qrs_threshold,
l_freq=l_freq, h_freq=h_freq)
n_events = len(ecg_events)
ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return ecg_events
@verbose
def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
h_freq=10, verbose=None):
"""Locate EOG artifacts from one selected ICA source.
Parameters
----------
raw : instance of Raw
The raw data.
eog_source : ndarray
ICA source resembling EOG to find peaks from.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
%(verbose)s
Returns
-------
eog_events : array
Events.
"""
eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp)
return eog_events
def _get_target_ch(container, target):
"""Aux function."""
# auto target selection
picks = pick_channels(container.ch_names, include=[target])
ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
if len(ref_picks) > 0:
picks = list(set(picks) - set(ref_picks))
if len(picks) == 0:
raise ValueError('%s not in channel list (%s)' %
(target, container.ch_names))
return picks
def _find_sources(sources, target, score_func):
"""Aux function."""
if isinstance(score_func, str):
score_func = get_score_funcs().get(score_func, score_func)
if not callable(score_func):
raise ValueError('%s is not a valid score_func.' % score_func)
scores = (score_func(sources, target) if target is not None
else score_func(sources, 1))
return scores
def _ica_explained_variance(ica, inst, normalize=False):
"""Check variance accounted for by each component in supplied data.
Parameters
----------
ica : ICA
Instance of `mne.preprocessing.ICA`.
inst : Raw | Epochs | Evoked
Data to explain with ICA. Instance of Raw, Epochs or Evoked.
normalize : bool
Whether to normalize the variance.
Returns
-------
var : array
Variance explained by each component.
"""
# check if ica is ICA and whether inst is Raw or Epochs
if not isinstance(ica, ICA):
raise TypeError('first argument must be an instance of ICA.')
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise TypeError('second argument must an instance of either Raw, '
'Epochs or Evoked.')
source_data = _get_inst_data(ica.get_sources(inst))
# if epochs - reshape to channels x timesamples
if isinstance(inst, BaseEpochs):
n_epochs, n_chan, n_samp = source_data.shape
source_data = source_data.transpose(1, 0, 2).reshape(
(n_chan, n_epochs * n_samp))
n_chan, n_samp = source_data.shape
var = np.sum(ica.mixing_matrix_ ** 2, axis=0) * np.sum(
source_data ** 2, axis=1) / (n_chan * n_samp - 1)
if normalize:
var /= var.sum()
return var
def _sort_components(ica, order, copy=True):
"""Change the order of components in ica solution."""
assert ica.n_components_ == len(order)
if copy:
ica = ica.copy()
# reorder components
ica.mixing_matrix_ = ica.mixing_matrix_[:, order]
ica.unmixing_matrix_ = ica.unmixing_matrix_[order, :]
# reorder labels, excludes etc.
if isinstance(order, np.ndarray):
order = list(order)
if ica.exclude:
ica.exclude = [order.index(ic) for ic in ica.exclude]
for k in ica.labels_.keys():
ica.labels_[k] = [order.index(ic) for ic in ica.labels_[k]]
return ica
def _serialize(dict_, outer_sep=';', inner_sep=':'):
"""Aux function."""
s = []
for key, value in dict_.items():
if callable(value):
value = value.__name__
elif isinstance(value, Integral):
value = int(value)
elif isinstance(value, dict):
# py35 json does not support numpy int64
for subkey, subvalue in value.items():
if isinstance(subvalue, list):
if len(subvalue) > 0:
if isinstance(subvalue[0], (int, np.integer)):
value[subkey] = [int(i) for i in subvalue]
for cls in (np.random.RandomState, Covariance):
if isinstance(value, cls):
value = cls.__name__
s.append(key + inner_sep + json.dumps(value))
return outer_sep.join(s)
def _deserialize(str_, outer_sep=';', inner_sep=':'):
"""Aux Function."""
out = {}
for mapping in str_.split(outer_sep):
k, v = mapping.split(inner_sep, 1)
out[k] = json.loads(v)
return out
def _write_ica(fid, ica):
"""Write an ICA object.
Parameters
----------
fid: file
The file descriptor
ica:
The instance of ICA to write
"""
ica_init = dict(noise_cov=ica.noise_cov,
n_components=ica.n_components,
n_pca_components=ica.n_pca_components,
max_pca_components=ica._max_pca_components,
current_fit=ica.current_fit,
allow_ref_meg=ica.allow_ref_meg)
if ica.info is not None:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if ica.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
# Write measurement info
write_meas_info(fid, ica.info)
end_block(fid, FIFF.FIFFB_MEAS)
start_block(fid, FIFF.FIFFB_MNE_ICA)
# ICA interface params
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# Channel names
if ica.ch_names is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
# samples on fit
n_samples = getattr(ica, 'n_samples_', None)
ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),
'labels_': getattr(ica, 'labels_', None),
'method': getattr(ica, 'method', None),
'n_iter_': getattr(ica, 'n_iter_', None),
'fit_params': getattr(ica, 'fit_params', None)}
# ICA misc params
write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
_serialize(ica_misc))
# Whitener
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica.pre_whitener_)
# PCA components_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
ica.pca_components_)
# PCA mean_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
# PCA explained_variance_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
ica.pca_explained_variance_)
# ICA unmixing
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
# Write bad components
write_int(fid, FIFF.FIFF_MNE_ICA_BADS, list(ica.exclude))
# Done!
end_block(fid, FIFF.FIFFB_MNE_ICA)
@verbose
def read_ica(fname, verbose=None):
"""Restore ICA solution from fif file.
Parameters
----------
fname : str
Absolute path to fif file containing ICA matrices.
The file name should end with -ica.fif or -ica.fif.gz.
%(verbose)s
Returns
-------
ica : instance of ICA
The ICA estimator.
"""
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
try:
# we used to store bads that weren't part of the info...
info, _ = read_meas_info(fid, tree, clean_bads=True)
except ValueError:
logger.info('Could not find the measurement info. \n'
'Functionality requiring the info won\'t be'
' available.')
info = None
ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)
if len(ica_data) == 0:
ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11
if len(ica_data) == 0:
fid.close()
raise ValueError('Could not find ICA data')
my_ica_data = ica_data[0]
for d in my_ica_data['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
tag = read_tag(fid, pos)
ica_init = tag.data
elif kind == FIFF.FIFF_MNE_ROW_NAMES:
tag = read_tag(fid, pos)
ch_names = tag.data
elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
tag = read_tag(fid, pos)
pre_whitener = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
tag = read_tag(fid, pos)
pca_components = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
tag = read_tag(fid, pos)
pca_explained_variance = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
tag = read_tag(fid, pos)
pca_mean = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
tag = read_tag(fid, pos)
unmixing_matrix = tag.data
elif kind == FIFF.FIFF_MNE_ICA_BADS:
tag = read_tag(fid, pos)
exclude = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
tag = read_tag(fid, pos)
ica_misc = tag.data
fid.close()
ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
n_pca_components = ica_init.pop('n_pca_components')
current_fit = ica_init.pop('current_fit')
max_pca_components = ica_init.pop('max_pca_components')
method = ica_misc.get('method', 'fastica')
if method in _KNOWN_ICA_METHODS:
ica_init['method'] = method
if ica_init['noise_cov'] == Covariance.__name__:
logger.info('Reading whitener drawn from noise covariance ...')
logger.info('Now restoring ICA solution ...')
# make sure dtypes are np.float64 to satisfy fast_dot
def f(x):
return x.astype(np.float64)
ica_init = {k: v for k, v in ica_init.items()
if k in _get_args(ICA.__init__)}
ica = ICA(**ica_init)
ica.current_fit = current_fit
ica.ch_names = ch_names.split(':')
if n_pca_components is not None and \
not isinstance(n_pca_components, int_like):
n_pca_components = np.float64(n_pca_components)
ica.n_pca_components = n_pca_components
ica.pre_whitener_ = f(pre_whitener)
ica.pca_mean_ = f(pca_mean)
ica.pca_components_ = f(pca_components)
ica.n_components_ = unmixing_matrix.shape[0]
ica._max_pca_components = max_pca_components
ica._update_ica_names()
ica.pca_explained_variance_ = f(pca_explained_variance)
ica.unmixing_matrix_ = f(unmixing_matrix)
ica._update_mixing_matrix()
ica.exclude = [] if exclude is None else list(exclude)
ica.info = info
if 'n_samples_' in ica_misc:
ica.n_samples_ = ica_misc['n_samples_']
if 'labels_' in ica_misc:
labels_ = ica_misc['labels_']
if labels_ is not None:
ica.labels_ = labels_
if 'method' in ica_misc:
ica.method = ica_misc['method']
if 'n_iter_' in ica_misc:
ica.n_iter_ = ica_misc['n_iter_']
if 'fit_params' in ica_misc:
ica.fit_params = ica_misc['fit_params']
logger.info('Ready.')
return ica
_ica_node = namedtuple('Node', 'name target score_func criterion')
def _detect_artifacts(ica, raw, start_find, stop_find, ecg_ch, ecg_score_func,
ecg_criterion, eog_ch, eog_score_func, eog_criterion,
skew_criterion, kurt_criterion, var_criterion,
add_nodes):
"""Aux Function."""
from scipy import stats
nodes = []
if ecg_ch is not None:
nodes += [_ica_node('ECG', ecg_ch, ecg_score_func, ecg_criterion)]
if eog_ch not in [None, []]:
if not isinstance(eog_ch, list):
eog_ch = [eog_ch]
for idx, ch in enumerate(eog_ch):
nodes += [_ica_node('EOG %02d' % idx, ch, eog_score_func,
eog_criterion)]
if skew_criterion is not None:
nodes += [_ica_node('skewness', None, stats.skew, skew_criterion)]
if kurt_criterion is not None:
nodes += [_ica_node('kurtosis', None, stats.kurtosis, kurt_criterion)]
if var_criterion is not None:
nodes += [_ica_node('variance', None, np.var, var_criterion)]
if add_nodes is not None:
nodes.extend(add_nodes)
for node in nodes:
scores = ica.score_sources(raw, start=start_find, stop=stop_find,
target=node.target,
score_func=node.score_func)
if isinstance(node.criterion, float):
found = list(np.where(np.abs(scores) > node.criterion)[0])
else:
# Sort in descending order; use (-abs()), rather than [::-1] to
# keep any NaN values in the end (and also keep the order of same
# values):
found = list(np.atleast_1d((-np.abs(scores)).argsort()
[node.criterion]))
case = (len(found), _pl(found), node.name)
logger.info(' found %s artifact%s by %s' % case)
ica.exclude = list(ica.exclude) + found
logger.info('Artifact indices found:\n ' + str(ica.exclude).strip('[]'))
if len(set(ica.exclude)) != len(ica.exclude):
logger.info(' Removing duplicate indices...')
ica.exclude = list(set(ica.exclude))
logger.info('Ready.')
@verbose
def _band_pass_filter(inst, sources, target, l_freq, h_freq, verbose=None):
"""Optionally band-pass filter the data."""
if l_freq is not None and h_freq is not None:
logger.info('... filtering ICA sources')
# use FIR here, steeper is better
kw = dict(phase='zero-double', filter_length='10s', fir_window='hann',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
fir_design='firwin2')
sources = filter_data(sources, inst.info['sfreq'], l_freq, h_freq,
**kw)
logger.info('... filtering target')
target = filter_data(target, inst.info['sfreq'], l_freq, h_freq, **kw)
elif l_freq is not None or h_freq is not None:
raise ValueError('Must specify both pass bands')
return sources, target
# #############################################################################
# CORRMAP
def _find_max_corrs(all_maps, target, threshold):
"""Compute correlations between template and target components."""
all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
abs_corrs = [np.abs(a) for a in all_corrs]
corr_polarities = [np.sign(a) for a in all_corrs]
if threshold <= 1:
max_corrs = [list(np.nonzero(s_corr > threshold)[0])
for s_corr in abs_corrs]
else:
max_corrs = [list(_find_outliers(s_corr, threshold=threshold))
for s_corr in abs_corrs]
am = [l_[i] for l_, i_s in zip(abs_corrs, max_corrs)
for i in i_s]
median_corr_with_target = np.median(am) if len(am) > 0 else 0
polarities = [l_[i] for l_, i_s in zip(corr_polarities, max_corrs)
for i in i_s]
maxmaps = [l_[i] for l_, i_s in zip(all_maps, max_corrs)
for i in i_s]
if len(maxmaps) == 0:
return [], 0, 0, []
newtarget = np.zeros(maxmaps[0].size)
std_of_maps = np.std(np.asarray(maxmaps))
mean_of_maps = np.std(np.asarray(maxmaps))
for maxmap, polarity in zip(maxmaps, polarities):
newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
newtarget /= len(maxmaps)
newtarget *= std_of_maps
sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
return newtarget, median_corr_with_target, sim_i_o, max_corrs
@verbose
def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg",
plot=True, show=True, outlines='head',
sensors=True, contours=6, cmap=None, sphere=None, verbose=None):
"""Find similar Independent Components across subjects by map similarity.
Corrmap (Viola et al. 2009 Clin Neurophysiol) identifies the best group
match to a supplied template. Typically, feed it a list of fitted ICAs and
a template IC, for example, the blink for the first subject, to identify
specific ICs across subjects.
The specific procedure consists of two iterations. In a first step, the
maps best correlating with the template are identified. In the next step,
the analysis is repeated with the mean of the maps identified in the first
stage.
Run with ``plot`` and ``show`` set to ``True`` and ``label=False`` to find
good parameters. Then, run with labelling enabled to apply the
labelling in the IC objects. (Running with both ``plot`` and ``labels``
off does nothing.)
Outputs a list of fitted ICAs with the indices of the marked ICs in a
specified field.
The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
Parameters
----------
icas : list of mne.preprocessing.ICA
A list of fitted ICA objects.
template : tuple | np.ndarray, shape (n_components,)
Either a tuple with two elements (int, int) representing the list
indices of the set from which the template should be chosen, and the
template. E.g., if template=(1, 0), the first IC of the 2nd ICA object
is used.
Or a numpy array whose size corresponds to each IC map from the
supplied maps, in which case this map is chosen as the template.
threshold : "auto" | list of float | float
Correlation threshold for identifying ICs
If "auto", search for the best map by trying all correlations between
0.6 and 0.95. In the original proposal, lower values are considered,
but this is not yet implemented.
If list of floats, search for the best map in the specified range of
correlation strengths. As correlation values, must be between 0 and 1
If float > 0, select ICs correlating better than this.
If float > 1, use z-scoring to identify ICs within subjects (not in
original Corrmap)
Defaults to "auto".
label : None | str
If not None, categorised ICs are stored in a dictionary ``labels_``
under the given name. Preexisting entries will be appended to
(excluding repeats), not overwritten. If None, a dry run is performed
and the supplied ICs are not changed.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. Defaults to 'eeg'.
plot : bool
Should constructed template and selected maps be plotted? Defaults
to True.
show : bool
Show figures if True.
%(topomap_outlines)s
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
contours : int | array of float
The number of contour lines to draw. If 0, no contours will be drawn.
When an integer, matplotlib ticker locator is used to find suitable
values for the contour thresholds (may sometimes be inaccurate, use
array for accuracy). If an array, the values represent the levels for
the contours. Defaults to 6.
cmap : None | matplotlib colormap
Colormap for the plot. If ``None``, defaults to 'Reds_r' for norm data,
otherwise to 'RdBu_r'.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
template_fig : Figure
Figure showing the template.
labelled_ics : Figure
Figure showing the labelled ICs in all ICA decompositions.
"""
if not isinstance(plot, bool):
raise ValueError("`plot` must be of type `bool`")
same_chans = _check_all_same_channel_names(icas)
if same_chans is False:
raise ValueError("Not all ICA instances have the same channel names. "
"Corrmap requires all instances to have the same "
"montage. Consider interpolating bad channels before "
"running ICA.")
threshold_extra = ''
if threshold == 'auto':
threshold = np.arange(60, 95, dtype=np.float64) / 100.
threshold_extra = ' ("auto")'
all_maps = [ica.get_components().T for ica in icas]
# check if template is an index to one IC in one ICA object, or an array
if len(template) == 2:
target = all_maps[template[0]][template[1]]
is_subject = True
elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:
target = template
is_subject = False
else:
raise ValueError("`template` must be a length-2 tuple or an array the "
"size of the ICA maps.")
template_fig, labelled_ics = None, None
if plot is True:
if is_subject: # plotting from an ICA object
ttl = 'Template from subj. {}'.format(str(template[0]))
template_fig = icas[template[0]].plot_components(
picks=template[1], ch_type=ch_type, title=ttl,
outlines=outlines, cmap=cmap, contours=contours,
show=show, topomap_args=dict(sphere=sphere))
else: # plotting an array
template_fig = _plot_corrmap([template], [0], [0], ch_type,
icas[0].copy(), "Template",
outlines=outlines, cmap=cmap,
contours=contours,
show=show, template=True,
sphere=sphere)
template_fig.subplots_adjust(top=0.8)
template_fig.canvas.draw()
# first run: use user-selected map
threshold = np.atleast_1d(np.array(threshold, float)).ravel()
threshold_err = ('No component detected using when z-scoring '
'threshold%s %s, consider using a more lenient '
'threshold' % (threshold_extra, threshold))
if len(all_maps) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
# find iteration with highest avg correlation with target
new_target, _, _, _ = paths[np.argmax([path[2] for path in paths])]
# second run: use output from first run
if len(all_maps) == 0 or len(new_target) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, new_target, t) for t in threshold]
del new_target
# find iteration with highest avg correlation with target
_, median_corr, _, max_corrs = paths[
np.argmax([path[1] for path in paths])]
allmaps, indices, subjs, nones = [list() for _ in range(4)]
logger.info('Median correlation with constructed map: %0.3f' % median_corr)
del median_corr
if plot is True:
logger.info('Displaying selected ICs per subject.')
for ii, (ica, max_corr) in enumerate(zip(icas, max_corrs)):
if len(max_corr) > 0:
if isinstance(max_corr[0], np.ndarray):
max_corr = max_corr[0]
if label is not None:
ica.labels_[label] = list(set(list(max_corr) +
ica.labels_.get(label, list())))
if plot is True:
allmaps.extend(ica.get_components()[:, max_corr].T)
subjs.extend([ii] * len(max_corr))
indices.extend(max_corr)
else:
if (label is not None) and (label not in ica.labels_):
ica.labels_[label] = list()
nones.append(ii)
if len(nones) == 0:
logger.info('At least 1 IC detected for each subject.')
else:
logger.info('No maps selected for subject%s %s, '
'consider a more liberal threshold.'
% (_pl(nones), nones))
if plot is True:
labelled_ics = _plot_corrmap(allmaps, subjs, indices, ch_type, ica,
label, outlines=outlines, cmap=cmap,
contours=contours,
show=show, sphere=sphere)
return template_fig, labelled_ics
else:
return None
@verbose
def read_ica_eeglab(fname, *, verbose=None):
"""Load ICA information saved in an EEGLAB .set file.
Parameters
----------
fname : str
Complete path to a .set EEGLAB file that contains an ICA object.
%(verbose)s
Returns
-------
ica : instance of ICA
An ICA object based on the information contained in the input file.
"""
from scipy import linalg
eeg = _check_load_mat(fname, None)
info, eeg_montage, _ = _get_info(eeg)
info.set_montage(eeg_montage)
pick_info(info, np.round(eeg['icachansind']).astype(int) - 1, copy=False)
rank = eeg.icasphere.shape[0]
n_components = eeg.icaweights.shape[0]
ica = ICA(method='imported_eeglab', n_components=n_components)
ica.current_fit = "eeglab"
ica.ch_names = info["ch_names"]
ica.n_pca_components = None
ica.n_components_ = n_components
n_ch = len(ica.ch_names)
assert len(eeg.icachansind) == n_ch
ica.pre_whitener_ = np.ones((n_ch, 1))
ica.pca_mean_ = np.zeros(n_ch)
assert eeg.icasphere.shape[1] == n_ch
assert eeg.icaweights.shape == (n_components, rank)
# When PCA reduction is used in EEGLAB, runica returns
# weights= weights*sphere*eigenvectors(:,1:ncomps)';
# sphere = eye(urchans). When PCA reduction is not used, we have:
#
# eeg.icawinv == pinv(eeg.icaweights @ eeg.icasphere)
#
# So in either case, we can use SVD to get our square whitened
# weights matrix (u * s) and our PCA vectors (v) back:
use = eeg.icaweights @ eeg.icasphere
use_check = linalg.pinv(eeg.icawinv)
if not np.allclose(use, use_check, rtol=1e-6):
warn('Mismatch between icawinv and icaweights @ icasphere from EEGLAB '
'possibly due to ICA component removal, assuming icawinv is '
'correct')
use = use_check
u, s, v = _safe_svd(use, full_matrices=False)
ica.unmixing_matrix_ = u * s
ica.pca_components_ = v
ica.pca_explained_variance_ = s * s
ica.info = info
ica._update_mixing_matrix()
ica._update_ica_names()
return ica
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/frame/test_to_csv.py | 1 | 45178 | # -*- coding: utf-8 -*-
from __future__ import print_function
import csv
import pytest
from numpy import nan
import numpy as np
from pandas.compat import (lmap, range, lrange, StringIO, u)
from pandas.core.common import _all_none
from pandas.errors import ParserError
from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp,
date_range, read_csv, compat, to_datetime)
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
ensure_clean,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameToCSV(TestData):
def read_csv(self, path, **kwargs):
params = dict(index_col=0, parse_dates=True)
params.update(**kwargs)
return pd.read_csv(path, **params)
def test_from_csv_deprecation(self):
# see gh-17812
with ensure_clean('__tmp_from_csv_deprecation__') as path:
self.tsframe.to_csv(path)
with tm.assert_produces_warning(FutureWarning):
depr_recons = DataFrame.from_csv(path)
assert_frame_equal(self.tsframe, depr_recons)
def test_to_csv_from_csv1(self):
with ensure_clean('__tmp_to_csv_from_csv1__') as path:
self.frame['A'][:5] = nan
self.frame.to_csv(path)
self.frame.to_csv(path, columns=['A', 'B'])
self.frame.to_csv(path, header=False)
self.frame.to_csv(path, index=False)
# test roundtrip
self.tsframe.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(self.tsframe, recons)
self.tsframe.to_csv(path, index_label='index')
recons = self.read_csv(path, index_col=None)
assert(len(recons.columns) == len(self.tsframe.columns) + 1)
# no index
self.tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(self.tsframe.values, recons.values)
# corner case
dm = DataFrame({'s1': Series(lrange(3), lrange(3)),
's2': Series(lrange(2), lrange(2))})
dm.to_csv(path)
recons = self.read_csv(path)
assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self):
with ensure_clean('__tmp_to_csv_from_csv2__') as path:
# duplicate index
df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path)
assert_frame_equal(result, df)
midx = MultiIndex.from_tuples(
[('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx,
columns=['x', 'y', 'z'])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2],
parse_dates=False)
assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = self.frame2.copy()
xp.columns = col_aliases
assert_frame_equal(xp, rs)
pytest.raises(ValueError, self.frame2.to_csv, path,
header=['AA', 'X'])
def test_to_csv_from_csv3(self):
with ensure_clean('__tmp_to_csv_from_csv3__') as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode='a', header=False)
xp = pd.concat([df1, df2])
rs = pd.read_csv(path, index_col=0)
rs.columns = lmap(int, rs.columns)
xp.columns = lmap(int, xp.columns)
assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with ensure_clean('__tmp_to_csv_from_csv4__') as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = pd.DataFrame({'dt_data': [i * dt for i in range(3)]},
index=pd.Index([i * dt for i in range(3)],
name='dt_index'))
df.to_csv(path)
result = pd.read_csv(path, index_col='dt_index')
result.index = pd.to_timedelta(result.index)
# TODO: remove renaming when GH 10875 is solved
result.index = result.index.rename('dt_index')
result['dt_data'] = pd.to_timedelta(result['dt_data'])
assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self):
# tz, 8260
with ensure_clean('__tmp_to_csv_from_csv5__') as path:
self.tzframe.to_csv(path)
result = pd.read_csv(path, index_col=0, parse_dates=['A'])
converter = lambda c: to_datetime(result[c]).dt.tz_localize(
'UTC').dt.tz_convert(self.tzframe[c].dt.tz)
result['B'] = converter('B')
result['C'] = converter('C')
assert_frame_equal(result, self.tzframe)
def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
chunksize = 5
N = int(chunksize * 2.5)
df = mkdf(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
import pandas as pd
def _check_df(df, cols=None):
with ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = pd.read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(
cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
assert_series_equal(obj_df, obj_rs)
else:
assert_frame_equal(
obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
_check_df(df, None)
# dupe cols with selection
cols = ['b', 'a']
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
from pandas import NaT
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range('2000', freq='5min', periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with ensure_clean('1.csv') as pth:
df = DataFrame(dict(a=s1, b=s2))
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth)._convert(datetime=True,
coerce=True)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(df, r_dtype=None, c_dtype=None,
rnlvl=None, cnlvl=None, dupe_col=False):
kwargs = dict(parse_dates=False)
if cnlvl:
if rnlvl is not None:
kwargs['index_col'] = lrange(rnlvl)
kwargs['header'] = lrange(cnlvl)
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8',
chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs['header'] = 0
with ensure_clean('__tmp_to_csv_moar__') as path:
df.to_csv(path, encoding='utf8', chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, compat.text_type):
return x.decode('utf8')
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[
:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1:]
type_map = dict(i='i', f='f', s='O', u='O', dt='O', p='O')
if r_dtype:
if r_dtype == 'u': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(_to_uni, recons.index),
dtype=r_dtype)
df.index = np.array(lmap(_to_uni, df.index), dtype=r_dtype)
elif r_dtype == 'dt': # unicode
r_dtype = 'O'
recons.index = np.array(lmap(Timestamp, recons.index),
dtype=r_dtype)
df.index = np.array(
lmap(Timestamp, df.index), dtype=r_dtype)
elif r_dtype == 'p':
r_dtype = 'O'
recons.index = np.array(
list(map(Timestamp, to_datetime(recons.index))),
dtype=r_dtype)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())),
dtype=r_dtype)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == 'u':
c_dtype = 'O'
recons.columns = np.array(lmap(_to_uni, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(_to_uni, df.columns), dtype=c_dtype)
elif c_dtype == 'dt':
c_dtype = 'O'
recons.columns = np.array(lmap(Timestamp, recons.columns),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns), dtype=c_dtype)
elif c_dtype == 'p':
c_dtype = 'O'
recons.columns = np.array(
lmap(Timestamp, to_datetime(recons.columns)),
dtype=c_dtype)
df.columns = np.array(
lmap(Timestamp, df.columns.to_timestamp()),
dtype=c_dtype)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)
N = 100
chunksize = 1000
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
for ncols in [4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type='dt',
c_idx_type='s'), 'dt', 's')
pass
for r_idx_type, c_idx_type in [('i', 'i'), ('s', 's'), ('u', 'dt'),
('p', 'p')]:
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [2, 10, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_type=r_idx_type,
c_idx_type=c_idx_type),
r_idx_type, c_idx_type)
for ncols in [1, 2, 3, 4]:
base = int((chunksize // ncols or 1) or 1)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = mkdf(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=lrange(10)))
_do_test(mkdf(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2, 2 * N - 2,
2 * N - 1, 2 * N, 2 * N + 1, 2 * N + 2,
base - 1, base, base + 1]:
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(mkdf(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(mkdf(nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2),
rnlvl=2, cnlvl=2)
def test_to_csv_from_csv_w_some_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['G'] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < .5]
self.frame['H'] = self.frame.index.map(f)
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_from_csv_w_all_infs(self):
# test roundtrip with inf, -inf, nan, as full columns and mix
self.frame['E'] = np.inf
self.frame['F'] = -np.inf
with ensure_clean() as path:
self.frame.to_csv(path)
recons = self.read_csv(path)
# TODO to_csv drops column name
assert_frame_equal(self.frame, recons, check_names=False)
assert_frame_equal(np.isinf(self.frame),
np.isinf(recons), check_names=False)
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with ensure_clean('__tmp_to_csv_no_index__') as path:
df = DataFrame({'c1': [1, 2, 3], 'c2': [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
df['c3'] = Series([7, 8, 9], dtype='int64')
df.to_csv(path, index=False)
result = read_csv(path)
assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ['a', 'b', 'c'],
1: ['aa', 'bb', 'cc']})
df['test'] = 'txt'
assert df.to_csv() == df.to_csv(columns=[0, 1, 'test'])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])
with ensure_clean('__tmp_to_csv_headers__') as path:
from_df.to_csv(path, header=['X', 'Y'])
recons = self.read_csv(path)
assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=['X', 'Y'])
recons = self.read_csv(path)
recons.reset_index(inplace=True)
assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self):
frame = self.frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])
frame.index = new_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=['A', 'B'])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1],
parse_dates=False)
# TODO to_csv drops column name
assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
self.frame.index = old_index
# try multiindex with dates
tsframe = self.tsframe
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=['time', 'foo'])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
assert_frame_equal(tsframe, recons, check_names=False)
# do not load index
tsframe.to_csv(path)
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(tsframe.columns) + 2
# no index
tsframe.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
assert_almost_equal(recons.values, self.tsframe.values)
# needed if setUp becomes class method
self.tsframe.index = old_index
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# GH3571, GH1651, GH3141
def _make_frame(names=None):
if names is True:
names = ['first', 'second']
return DataFrame(np.random.randint(0, 10, size=(3, 3)),
columns=MultiIndex.from_tuples(
[('bah', 'foo'),
('bah', 'bar'),
('ban', 'baz')], names=names),
dtype='int64')
# column & index are multi-index
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1])
assert_frame_equal(df, result)
# column is mi
df = mkdf(5, 3, r_idx_nlevels=1, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(
path, header=[0, 1, 2, 3], index_col=0)
assert_frame_equal(df, result)
# dup column names?
df = mkdf(5, 3, r_idx_nlevels=3, c_idx_nlevels=4)
df.to_csv(path)
result = read_csv(path, header=[0, 1, 2, 3],
index_col=[0, 1, 2])
assert_frame_equal(df, result)
# writing with no index
df = _make_frame()
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert_frame_equal(df, result)
# we lose the names here
df = _make_frame(True)
df.to_csv(path, index=False)
result = read_csv(path, header=[0, 1])
assert _all_none(*result.columns.names)
result.columns.names = df.columns.names
assert_frame_equal(df, result)
# tupleize_cols=True and index=False
df = _make_frame(True)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True, index=False)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0,
tupleize_cols=True,
index_col=None)
result.columns = df.columns
assert_frame_equal(df, result)
# whatsnew example
df = _make_frame()
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
df = _make_frame(True)
df.to_csv(path)
result = read_csv(path, header=[0, 1],
index_col=[0])
assert_frame_equal(df, result)
# column & index are multi-index (compatibility)
df = mkdf(5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
with tm.assert_produces_warning(FutureWarning):
df.to_csv(path, tupleize_cols=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = read_csv(path, header=0, index_col=[0, 1],
tupleize_cols=True)
result.columns = df.columns
assert_frame_equal(df, result)
# invalid options
df = _make_frame(True)
df.to_csv(path)
for i in [6, 7]:
msg = 'len of {i}, but only 5 lines in file'.format(i=i)
with tm.assert_raises_regex(ParserError, msg):
read_csv(path, header=lrange(i), index_col=0)
# write with cols
with tm.assert_raises_regex(TypeError, 'cannot specify cols '
'with a MultiIndex'):
df.to_csv(path, columns=['foo', 'bar'])
with ensure_clean('__tmp_to_csv_multiindex__') as path:
# empty
tsframe[:0].to_csv(path)
recons = self.read_csv(path)
exp = tsframe[:0]
exp.index = []
tm.assert_index_equal(recons.columns, exp.columns)
assert len(recons) == 0
def test_to_csv_float32_nanrep(self):
df = DataFrame(np.random.randn(1, 4).astype(np.float32))
df[1] = np.nan
with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:
df.to_csv(path, na_rep=999)
with open(path) as f:
lines = f.readlines()
assert lines[1].split(',')[2] == '999'
def test_to_csv_withcommas(self):
# Commas inside fields should be correctly escaped when saving as CSV.
df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})
with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:
df.to_csv(path)
df2 = self.read_csv(path)
assert_frame_equal(df2, df)
def test_to_csv_mixed(self):
def create_cols(name):
return ["%s%03d" % (name, i) for i in range(5)]
df_float = DataFrame(np.random.randn(
100, 5), dtype='float64', columns=create_cols('float'))
df_int = DataFrame(np.random.randn(100, 5),
dtype='int64', columns=create_cols('int'))
df_bool = DataFrame(True, index=df_float.index,
columns=create_cols('bool'))
df_object = DataFrame('foo', index=df_float.index,
columns=create_cols('object'))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=create_cols('date'))
# add in some nans
df_float.loc[30:50, 1:3] = np.nan
# ## this is a bug in read_csv right now ####
# df_dt.loc[30:50,1:3] = np.nan
df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1)
# dtype
dtypes = dict()
for n, dtype in [('float', np.float64), ('int', np.int64),
('bool', np.bool), ('object', np.object)]:
for c in create_cols(n):
dtypes[c] = dtype
with ensure_clean() as filename:
df.to_csv(filename)
rs = read_csv(filename, index_col=0, dtype=dtypes,
parse_dates=create_cols('date'))
assert_frame_equal(rs, df)
def test_to_csv_dups_cols(self):
df = DataFrame(np.random.randn(1000, 30), columns=lrange(
15) + lrange(15), dtype='float64')
with ensure_clean() as filename:
df.to_csv(filename) # single dtype, fine
result = read_csv(filename, index_col=0)
result.columns = df.columns
assert_frame_equal(result, df)
df_float = DataFrame(np.random.randn(1000, 3), dtype='float64')
df_int = DataFrame(np.random.randn(1000, 3), dtype='int64')
df_bool = DataFrame(True, index=df_float.index, columns=lrange(3))
df_object = DataFrame('foo', index=df_float.index, columns=lrange(3))
df_dt = DataFrame(Timestamp('20010101'),
index=df_float.index, columns=lrange(3))
df = pd.concat([df_float, df_int, df_bool, df_object,
df_dt], axis=1, ignore_index=True)
cols = []
for i in range(5):
cols.extend([0, 1, 2])
df.columns = cols
with ensure_clean() as filename:
df.to_csv(filename)
result = read_csv(filename, index_col=0)
# date cols
for i in ['0.4', '1.4', '2.4']:
result[i] = to_datetime(result[i])
result.columns = df.columns
assert_frame_equal(result, df)
# GH3457
from pandas.util.testing import makeCustomDataframe as mkdf
N = 10
df = mkdf(N, 3)
df.columns = ['a', 'a', 'b']
with ensure_clean() as filename:
df.to_csv(filename)
# read_csv will rename the dups columns
result = read_csv(filename, index_col=0)
result = result.rename(columns={'a.1': 'a'})
assert_frame_equal(result, df)
def test_to_csv_chunking(self):
aa = DataFrame({'A': lrange(100000)})
aa['B'] = aa.A + 1.0
aa['C'] = aa.A + 2.0
aa['D'] = aa.A + 3.0
for chunksize in [10000, 50000, 100000]:
with ensure_clean() as filename:
aa.to_csv(filename, chunksize=chunksize)
rs = read_csv(filename, index_col=0)
assert_frame_equal(rs, aa)
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
with ensure_clean() as filename:
df.to_csv(filename, header=False, index=False)
rs = read_csv(filename, header=None)
assert_frame_equal(rs, df)
def test_to_csv_bug(self):
f1 = StringIO('a,1.0\nb,2.0')
df = self.read_csv(f1, header=None)
newdf = DataFrame({'t': df[df.columns[0]]})
with ensure_clean() as path:
newdf.to_csv(path)
recons = read_csv(path, index_col=0)
# don't check_names as t != 1
assert_frame_equal(recons, newdf, check_names=False)
def test_to_csv_unicode(self):
df = DataFrame({u('c/\u03c3'): [1, 2, 3]})
with ensure_clean() as path:
df.to_csv(path, encoding='UTF-8')
df2 = read_csv(path, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
df.to_csv(path, encoding='UTF-8', index=False)
df2 = read_csv(path, index_col=None, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_unicode_index_col(self):
buf = StringIO('')
df = DataFrame(
[[u("\u05d0"), "d2", "d3", "d4"], ["a1", "a2", "a3", "a4"]],
columns=[u("\u05d0"),
u("\u05d1"), u("\u05d2"), u("\u05d3")],
index=[u("\u05d0"), u("\u05d1")])
df.to_csv(buf, encoding='UTF-8')
buf.seek(0)
df2 = read_csv(buf, index_col=0, encoding='UTF-8')
assert_frame_equal(df, df2)
def test_to_csv_stringio(self):
buf = StringIO()
self.frame.to_csv(buf)
buf.seek(0)
recons = read_csv(buf, index_col=0)
# TODO to_csv drops column name
assert_frame_equal(recons, self.frame, check_names=False)
def test_to_csv_float_format(self):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, float_format='%.2f')
rs = read_csv(filename, index_col=0)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
assert_frame_equal(rs, xp)
def test_to_csv_unicodewriter_quoting(self):
df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})
buf = StringIO()
df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,
encoding='utf-8')
result = buf.getvalue()
expected = ('"A","B"\n'
'1,"foo"\n'
'2,"bar"\n'
'3,"baz"\n')
assert result == expected
def test_to_csv_quote_none(self):
# GH4328
df = DataFrame({'A': ['hello', '{"hello"}']})
for encoding in (None, 'utf-8'):
buf = StringIO()
df.to_csv(buf, quoting=csv.QUOTE_NONE,
encoding=encoding, index=False)
result = buf.getvalue()
expected = 'A\nhello\n{"hello"}\n'
assert result == expected
def test_to_csv_index_no_leading_comma(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, index_label=False)
expected = ('A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_line_terminators(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
index=['one', 'two', 'three'])
buf = StringIO()
df.to_csv(buf, line_terminator='\r\n')
expected = (',A,B\r\n'
'one,1,4\r\n'
'two,2,5\r\n'
'three,3,6\r\n')
assert buf.getvalue() == expected
buf = StringIO()
df.to_csv(buf) # The default line terminator remains \n
expected = (',A,B\n'
'one,1,4\n'
'two,2,5\n'
'three,3,6\n')
assert buf.getvalue() == expected
def test_to_csv_from_csv_categorical(self):
# CSV with categoricals should result in the same output as when one
# would add a "normal" Series/DataFrame.
s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']))
s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
res = StringIO()
s.to_csv(res)
exp = StringIO()
s2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
df = DataFrame({"s": s})
df2 = DataFrame({"s": s2})
res = StringIO()
df.to_csv(res)
exp = StringIO()
df2.to_csv(exp)
assert res.getvalue() == exp.getvalue()
def test_to_csv_path_is_none(self):
# GH 8215
# Make sure we return string for consistency with
# Series.to_csv()
csv_str = self.frame.to_csv(path_or_buf=None)
assert isinstance(csv_str, str)
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
def test_to_csv_compression_gzip(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="gzip")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="gzip", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is gziped
import gzip
f = gzip.open(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
def test_to_csv_compression_bz2(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="bz2")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="bz2", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is bz2ed
import bz2
f = bz2.BZ2File(filename, 'rb')
text = f.read().decode('utf8')
f.close()
for col in df.columns:
assert col in text
@td.skip_if_no_lzma
def test_to_csv_compression_xz(self):
# GH11852
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
df.to_csv(filename, compression="xz")
# test the round trip - to_csv -> read_csv
rs = read_csv(filename, compression="xz", index_col=0)
assert_frame_equal(df, rs)
# explicitly make sure file is xzipped
lzma = compat.import_lzma()
f = lzma.open(filename, 'rb')
assert_frame_equal(df, read_csv(f, index_col=0))
f.close()
def test_to_csv_compression_value_error(self):
# GH7615
# use the compression kw in to_csv
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with ensure_clean() as filename:
# zip compression is not supported and should raise ValueError
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_csv,
filename, compression="zip")
def test_to_csv_date_format(self):
with ensure_clean('__tmp_to_csv_date_format__') as path:
dt_index = self.tsframe.index
datetime_frame = DataFrame(
{'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
datetime_frame.to_csv(path, date_format='%Y%m%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_int = datetime_frame.applymap(
lambda x: int(x.strftime('%Y%m%d')))
datetime_frame_int.index = datetime_frame_int.index.map(
lambda x: int(x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_int)
datetime_frame.to_csv(path, date_format='%Y-%m-%d')
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(
lambda x: x.strftime('%Y-%m-%d'))
datetime_frame_str.index = datetime_frame_str.index.map(
lambda x: x.strftime('%Y-%m-%d'))
assert_frame_equal(test, datetime_frame_str)
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
datetime_frame_columns.to_csv(path, date_format='%Y%m%d')
test = read_csv(path, index_col=0)
datetime_frame_columns = datetime_frame_columns.applymap(
lambda x: int(x.strftime('%Y%m%d')))
# Columns don't get converted to ints by read_csv
datetime_frame_columns.columns = (
datetime_frame_columns.columns
.map(lambda x: x.strftime('%Y%m%d')))
assert_frame_equal(test, datetime_frame_columns)
# test NaTs
nat_index = to_datetime(
['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
nat_frame.to_csv(path, date_format='%Y-%m-%d')
test = read_csv(path, parse_dates=[0, 1], index_col=0)
assert_frame_equal(test, nat_frame)
def test_to_csv_with_dst_transitions(self):
with ensure_clean('csv_date_format_with_dst') as path:
# make sure we are not failing on transitions
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10s')]:
time_range = np.array(range(len(i)), dtype='int64')
df = DataFrame({'A': time_range}, index=i)
df.to_csv(path, index=True)
# we have to reconvert the index as we
# don't parse the tz's
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/London')
assert_frame_equal(result, df)
# GH11619
idx = pd.date_range('2015-01-01', '2015-12-31',
freq='H', tz='Europe/Paris')
df = DataFrame({'values': 1, 'idx': idx},
index=idx)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_csv(path, index=True)
result = read_csv(path, index_col=0)
result.index = to_datetime(result.index).tz_localize(
'UTC').tz_convert('Europe/Paris')
result['idx'] = to_datetime(result['idx']).astype(
'datetime64[ns, Europe/Paris]')
assert_frame_equal(result, df)
# assert working
df.astype(str)
with ensure_clean('csv_date_format_with_dst') as path:
df.to_pickle(path)
result = pd.read_pickle(path)
assert_frame_equal(result, df)
def test_to_csv_quoting(self):
df = DataFrame({
'c_string': ['a', 'b,c'],
'c_int': [42, np.nan],
'c_float': [1.0, 3.2],
'c_bool': [True, False],
})
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,"b,c"
"""
result = df.to_csv()
assert result == expected
result = df.to_csv(quoting=None)
assert result == expected
result = df.to_csv(quoting=csv.QUOTE_MINIMAL)
assert result == expected
expected = """\
"","c_bool","c_float","c_int","c_string"
"0","True","1.0","42.0","a"
"1","False","3.2","","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_ALL)
assert result == expected
# see gh-12922, gh-13259: make sure changes to
# the formatters do not break this behaviour
expected = """\
"","c_bool","c_float","c_int","c_string"
0,True,1.0,42.0,"a"
1,False,3.2,"","b,c"
"""
result = df.to_csv(quoting=csv.QUOTE_NONNUMERIC)
assert result == expected
msg = "need to escape, but no escapechar set"
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE)
tm.assert_raises_regex(csv.Error, msg, df.to_csv,
quoting=csv.QUOTE_NONE,
escapechar=None)
expected = """\
,c_bool,c_float,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,b!,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='!')
assert result == expected
expected = """\
,c_bool,c_ffloat,c_int,c_string
0,True,1.0,42.0,a
1,False,3.2,,bf,c
"""
result = df.to_csv(quoting=csv.QUOTE_NONE,
escapechar='f')
assert result == expected
# see gh-3503: quoting Windows line terminators
# presents with encoding?
text = 'a,b,c\n1,"test \r\n",3\n'
df = pd.read_csv(StringIO(text))
buf = StringIO()
df.to_csv(buf, encoding='utf-8', index=False)
assert buf.getvalue() == text
# xref gh-7791: make sure the quoting parameter is passed through
# with multi-indexes
df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]})
df = df.set_index(['a', 'b'])
expected = '"a","b","c"\n"1","3","5"\n"2","4","6"\n'
assert df.to_csv(quoting=csv.QUOTE_ALL) == expected
def test_period_index_date_overflow(self):
# see gh-15982
dates = ["1990-01-01", "2000-01-01", "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n2000-01-01,5\n3005-01-01,6\n'
assert result == expected
date_format = "%m-%d-%Y"
result = df.to_csv(date_format=date_format)
expected = ',0\n01-01-1990,4\n01-01-2000,5\n01-01-3005,6\n'
assert result == expected
# Overflow with pd.NaT
dates = ["1990-01-01", pd.NaT, "3005-01-01"]
index = pd.PeriodIndex(dates, freq="D")
df = pd.DataFrame([4, 5, 6], index=index)
result = df.to_csv()
expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n'
assert result == expected
def test_multi_index_header(self):
# see gh-5539
columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2),
("b", 1), ("b", 2)])
df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]])
df.columns = columns
header = ["a", "b", "c", "d"]
result = df.to_csv(header=header)
expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n"
assert result == expected
| bsd-3-clause |
zguangyu/rts2 | python/rts2/focusing.py | 1 | 10592 | #!/usr/bin/python
#
# Autofocosing routines.
#
# You will need: scipy matplotlib sextractor
# This should work on Debian/ubuntu:
# sudo apt-get install python-matplotlib python-scipy python-pyfits sextractor
#
# If you would like to see sextractor results, get DS9 and pyds9:
#
# http://hea-www.harvard.edu/saord/ds9/
#
# Please be aware that current sextractor Ubuntu packages does not work
# properly. The best workaround is to install package, and the overwrite
# sextractor binary with one compiled from sources (so you will have access
# to sextractor configuration files, which program assumes).
#
# (C) 2002-2008 Stanislav Vitek
# (C) 2002-2010 Martin Jelinek
# (C) 2009-2010 Markus Wildi
# (C) 2010-2014 Petr Kubanek, Institute of Physics <[email protected]>
# (C) 2010 Francisco Forster Buron, Universidad de Chile
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import scriptcomm
import sextractor
sepPresent = False
try:
import sep
sepPresent = True
except Exception,ex:
pass
from pylab import *
from scipy import *
from scipy import optimize
import numpy
LINEAR = 0
"""Linear fit"""
P2 = 1
"""Fit using 2 power polynomial"""
P4 = 2
"""Fit using 4 power polynomial"""
H3 = 3
"""Fit using general Hyperbola (three free parameters)"""
H2 = 4
"""Fit using Hyperbola with fixed slope at infinity (two free parameters)"""
class Focusing (scriptcomm.Rts2Comm):
"""Take and process focussing data."""
def __init__(self,exptime = 2,step=1,attempts=20,filterGalaxies=False):
scriptcomm.Rts2Comm.__init__(self)
self.exptime = 2 # 60 # 10
self.focuser = self.getValue('focuser')
try:
self.getValueFloat('AF_step',self.focuser)
except NameError:
self.step = 1 # 0.2
else:
self.step = self.getValueFloat('AF_step',self.focuser)
try:
self.getValueFloat('AF_attempts',self.focuser)
except NameError:
self.attempts = 20 #30 # 20
else:
self.attempts = self.getValueInteger('AF_attempts',self.focuser)
# if |offset| is above this value, try linear fit
self.linear_fit = self.step * self.attempts / 2.0
# target FWHM for linear fit
self.linear_fit_fwhm = 3.5
self.filterGalaxies = filterGalaxies
def doFit(self,fit):
b = None
errfunc = None
fitfunc_r = None
p0 = None
# try to fit..
# this function is for flux..
#fitfunc = lambda p, x: p[0] * p[4] / (p[4] + p[3] * (abs(x - p[1])) ** (p[2]))
# prepare fit based on its type..
if fit == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y: fitfunc(p, x) - y # LINEAR - distance to the target function
p0 = [1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x
elif fit == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P2 - distance to the target function
p0 = [1, 1, 1]
fitfunc_r = lambda x, p0, p1, p2 : p0 + p1 * x + p2 * (x ** 2)
elif fit == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
errfunc = lambda p, x, y: fitfunc(p, x) - y # P4 - distance to the target function
p0 = [1, 1, 1, 1, 1]
fitfunc_r = lambda x, p0, p1: p0 + p1 * x + p2 * (x ** 2) + p3 * (x ** 3) + p4 * (x ** 4)
elif fit == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2])**2)
errfunc = lambda p, x, y: fitfunc(p, x) - y # H3 - distance to the target function
p0 = [400., 3.46407715307, self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1, p2 : sqrt(p0 ** 2 + p1 ** 2 * (x - p2) ** 2)
elif fit == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1])**2) # 3.46 based on H3 fits
errfunc = lambda p, x, y: fitfunc(p, x) - y # H2 - distance to the target function
p0 = [400., self.fwhm_MinimumX] # initial guess based on real data
fitfunc_r = lambda x, p0, p1 : sqrt(p0 ** 2 + 3.46407715307 ** 2 * (x - p1) ** 2)
else:
raise Exception('Unknow fit type {0}'.format(fit))
self.fwhm_poly, success = optimize.leastsq(errfunc, p0[:], args=(self.focpos, self.fwhm))
b = None
if fit == LINEAR:
b = (self.linear_fit_fwhm - self.fwhm_poly[0]) / self.fwhm_poly[1]
elif fit == H3:
b = self.fwhm_poly[2]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
self.log('I', 'found slope at infinity: {0}'.format(abs(self.fwhm_poly[1])))
elif fit == H2:
b = self.fwhm_poly[1]
self.log('I', 'found minimum FWHM: {0}'.format(abs(self.fwhm_poly[0])))
else:
b = optimize.fmin(fitfunc_r,self.fwhm_MinimumX,args=(self.fwhm_poly), disp=0)[0]
self.log('I', 'found FWHM minimum at offset {0}'.format(b))
return b
def tryFit(self,defaultFit):
"""Try fit, change to linear fit if outside allowed range."""
b = self.doFit(defaultFit)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying H2 fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(H2)
if (abs(b - numpy.average(self.focpos)) >= self.linear_fit):
self.log('W','cannot do find best FWHM inside limits, trying linear fit - best fit is {0}, average focuser position is {1}'.format(b, numpy.average(self.focpos)))
b = self.doFit(LINEAR)
return b,LINEAR
return b,H2
return b,defaultFit
def doFitOnArrays(self,fwhm,focpos,defaultFit):
self.fwhm = array(fwhm)
self.focpos = array(focpos)
self.fwhm_MinimumX = 0
min_fwhm=fwhm[0]
for x in range(0,len(fwhm)):
if fwhm[x] < min_fwhm:
self.fwhm_MinimumX = x
min_fwhm = fwhm[x]
return self.tryFit(defaultFit)
def findBestFWHM(self,tries,defaultFit=H3,min_stars=95,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
fwhm_min = None
self.fwhm_MinimumX = None
keys = tries.keys()
keys.sort()
sextr = sextractor.Sextractor(threshold=threshold,deblendmin=deblendmin)
for k in keys:
try:
sextr.runSExtractor(tries[k])
fwhm,fwhms,nstars = sextr.calculate_FWHM(min_stars,self.filterGalaxies)
except Exception, ex:
self.log('W','offset {0}: {1}'.format(k,ex))
continue
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,fwhm,nstars))
focpos.append(k)
fwhm.append(fwhm)
if (fwhm_min is None or fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = fwhm
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def __sepFindFWHM(self,tries):
from astropy.io import fits
import math
import traceback
focpos=[]
fwhm=[]
fwhm_min=None
fwhm_MinimumX=None
keys = tries.keys()
keys.sort()
ln2=math.log(2)
for k in keys:
try:
fwhms=[]
ff=fits.open(tries[k])
# loop on images..
for i in range(1,len(ff)):
data=ff[i].data
bkg=sep.Background(numpy.array(data,numpy.float))
sources=sep.extract(data-bkg, 5.0 * bkg.globalrms)
for s in sources:
fwhms.append(2 * math.sqrt(ln2 * (s[12]**2 + s[13]**2)))
im_fwhm=numpy.median(fwhms)
# find median from fwhms measurements..
self.log('I','offset {0} fwhm {1} with {2} stars'.format(k,im_fwhm,len(fwhms)))
focpos.append(k)
fwhm.append(im_fwhm)
if (fwhm_min is None or im_fwhm < fwhm_min):
fwhm_MinimumX = k
fwhm_min = im_fwhm
except Exception,ex:
self.log('W','offset {0}: {1} {2}'.format(k,ex,traceback.format_exc()))
return focpos,fwhm,fwhm_min,fwhm_MinimumX
def findBestFWHM(self,tries,defaultFit=H3,min_stars=15,ds9display=False,threshold=2.7,deblendmin=0.03):
# X is FWHM, Y is offset value
self.focpos=[]
self.fwhm=[]
self.fwhm_min = None
self.fwhm_MinimumX = None
if sepPresent:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sepFindFWHM(tries)
else:
self.focpos,self.fwhm,self.fwhm_min,self.fwhm_MinimumX = self.__sexFindFWHM(tries,threshold,deblendmin)
self.focpos = array(self.focpos)
self.fwhm = array(self.fwhm)
return self.tryFit(defaultFit)
def beforeReadout(self):
self.current_focus = self.getValueFloat('FOC_POS',self.focuser)
if (self.num == self.attempts):
self.setValue('FOC_TOFF',0,self.focuser)
else:
self.off += self.step
self.setValue('FOC_TOFF',self.off,self.focuser)
def takeImages(self):
self.setValue('exposure',self.exptime)
self.setValue('SHUTTER','LIGHT')
self.off = -1 * self.step * (self.attempts / 2)
self.setValue('FOC_TOFF',self.off,self.focuser)
tries = {}
# must be overwritten in beforeReadout
self.current_focus = None
for self.num in range(1,self.attempts+1):
self.log('I','starting {0}s exposure on offset {1}'.format(self.exptime,self.off))
img = self.exposure(self.beforeReadout,'%b/focusing/%N/%o/%f')
tries[self.current_focus] = img
self.log('I','all focusing exposures finished, processing data')
return self.findBestFWHM(tries)
def run(self):
self.focuser = self.getValue('focuser')
# send to some other coordinates if you wish so, or disable this for target for fixed coordinates
self.altaz (89,90)
b,fit = self.takeImages()
if fit == LINEAR:
self.setValue('FOC_DEF',b,self.focuser)
b,fit = self.takeImages()
self.setValue('FOC_DEF',b,self.focuser)
def plotFit(self,b,ftype):
"""Plot fit graph."""
fitfunc = None
if ftype == LINEAR:
fitfunc = lambda p, x: p[0] + p[1] * x
elif ftype == P2:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2)
elif ftype == P4:
fitfunc = lambda p, x: p[0] + p[1] * x + p[2] * (x ** 2) + p[3] * (x ** 3) + p[4] * (x ** 4)
elif ftype == H3:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + p[1] ** 2 * (x - p[2]) ** 2)
elif ftype == H2:
fitfunc = lambda p, x: sqrt(p[0] ** 2 + 3.46407715307 ** 2 * (x - p[1]) ** 2) # 3.46 based on HYPERBOLA fits
else:
raise Exception('Unknow fit type {0}'.format(ftype))
x = linspace(self.focpos.min() - 1, self.focpos.max() + 1)
plot (self.focpos, self.fwhm, "r+", x, fitfunc(self.fwhm_poly, x), "r-")
show()
| gpl-2.0 |
mjgrav2001/scikit-learn | benchmarks/bench_plot_fastkmeans.py | 294 | 4676 | from __future__ import print_function
from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster.k_means_ import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
data = nr.random_integers(-50, 50, (n_samples, n_features))
print('K-Means')
tstart = time()
kmeans = KMeans(init='k-means++', n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results['kmeans_speed'].append(delta)
results['kmeans_quality'].append(kmeans.inertia_)
print('Fast K-Means')
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=10,
batch_size=chunk)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array([[1, 1], [-1, -1], [1, -1], [-1, 1],
[0.5, 0.5], [0.75, -0.5], [-1, 0.75], [1, 0]])
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print('==============================')
print('Iteration %03d of %03d' % (it, max_it))
print('==============================')
print()
print('Fast K-Means')
tstart = time()
mbkmeans = MiniBatchKMeans(init='k-means++',
n_clusters=8,
batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results['MiniBatchKMeans Speed'].append(delta)
results['MiniBatchKMeans Quality'].append(mbkmeans.inertia_)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 150, 5).astype(np.int)
features_range = np.linspace(150, 50000, 5).astype(np.int)
chunks = np.linspace(500, 10000, 15).astype(np.int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max([max(i) for i in [t for (label, t) in results.iteritems()
if "speed" in label]])
max_inertia = max([max(i) for i in [
t for (label, t) in results.iteritems()
if "speed" not in label]])
fig = plt.figure('scikit-learn K-Means benchmark results')
for c, (label, timings) in zip('brcy',
sorted(results.iteritems())):
if 'speed' in label:
ax = fig.add_subplot(2, 2, 1, projection='3d')
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection='3d')
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
i = 0
for c, (label, timings) in zip('br',
sorted(results_2.iteritems())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel('Chunks')
ax.set_ylabel(label)
plt.show()
| bsd-3-clause |
vjlbym/sima | sima/segment/tests/test_segment.py | 6 | 3121 | import sys
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import (
assert_,
assert_equal,
assert_almost_equal,
assert_array_almost_equal,
assert_raises,
assert_array_equal,
dec,
TestCase,
run_module_suite,
assert_allclose)
import os
import shutil
import matplotlib.path as mplPath
from sima import ImagingDataset, Sequence, ROI
from sima.misc import example_data, example_tiff
from sima import segment
from sima.segment.ca1pc import cv2_available
def setup():
global tmp_dir
tmp_dir = os.path.join(os.path.dirname(__file__), 'tmp')
try:
os.mkdir(tmp_dir)
except:
pass
def teardown():
global tmp_dir
shutil.rmtree(tmp_dir)
def _gaussian_2d(xy, xy0, xysig):
X, Y = np.meshgrid(xy[0], xy[1])
return np.exp(-0.5*(((X-xy0[0]) / xysig[0])**2 +
((Y-xy0[1]) / xysig[1])**2))
def test_extract_rois():
return
@dec.knownfailureif(
sys.version_info > (3, 0) and
LooseVersion(np.__version__) < LooseVersion('1.9.0'))
def test_STICA():
ds = ImagingDataset.load(example_data())
method = segment.STICA(components=5)
method.append(segment.SparseROIsFromMasks(min_size=50))
method.append(segment.SmoothROIBoundaries(tolerance=1,min_verts=8))
method.append(segment.MergeOverlapping(0.5))
ds.segment(method)
@dec.skipif(not cv2_available)
def test_PlaneNormalizedCuts():
ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
affinty_method = segment.BasicAffinityMatrix(num_pcs=5)
method = segment.PlaneWiseSegmentation(
segment.PlaneNormalizedCuts(affinty_method))
ds.segment(method)
@dec.skipif(not cv2_available)
def test_PlaneCA1PC():
ds = ImagingDataset.load(example_data())[:, :, :, :50, :50]
method = segment.PlaneCA1PC(num_pcs=5)
ds.segment(method)
class TestPostprocess(object):
def setup(self):
global tmp_dir
self.filepath = os.path.join(tmp_dir, "test_imaging_dataset.sima")
self.tiff_ds = ImagingDataset(
[Sequence.create('TIFF', example_tiff(), 1, 1)],
self.filepath)
def teardown(self):
shutil.rmtree(self.filepath)
def test_postprocess(self):
centers = [(10, 10), (40, 40), (70, 70), (100, 100)]
roi_xy = [np.arange(self.tiff_ds.sequences[0].shape[2]),
np.arange(self.tiff_ds.sequences[0].shape[3])]
rois = ROI.ROIList([
ROI.ROI(_gaussian_2d(roi_xy, center, (10, 10)))
for center in centers])
tobool = segment.SparseROIsFromMasks(n_processes=2)
smooth = segment.SmoothROIBoundaries(n_processes=2)
rois = smooth.apply(tobool.apply(rois))
assert_(len(rois) == len(centers))
for roi in rois:
polygon = mplPath.Path(roi.coords[0][:, :2])
for nc, center in enumerate(centers):
if polygon.contains_point(center):
centers.pop(nc)
break
assert_(len(centers) == 0)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
siutanwong/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | sklearn/feature_selection/tests/test_base.py | 98 | 3681 | import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
from sklearn.utils.testing import assert_raises, assert_equal
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
heli522/scikit-learn | sklearn/feature_selection/rfe.py | 137 | 17066 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
pybrain/pybrain | pybrain/tools/neuralnets.py | 26 | 13763 | # Neural network data analysis tool collection. Makes heavy use of the logging module.
# Can generate training curves during the run (from properly setup IPython and/or with
# TkAgg backend and interactive mode - see matplotlib documentation).
__author__ = "Martin Felder"
__version__ = "$Id$"
from pylab import ion, figure, draw
import csv
from numpy import Infinity
import logging
from pybrain.datasets import ClassificationDataSet, SequentialDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer, RPropMinusTrainer, Trainer
from pybrain.structure import SoftmaxLayer, LSTMLayer
from pybrain.utilities import setAllArgs
from pybrain.tools.plotting import MultilinePlotter
from pybrain.tools.validation import testOnSequenceData, ModuleValidator, Validator
from pybrain.tools.customxml import NetworkWriter
class NNtools(object):
""" Abstract class providing basic functionality to make neural network training more comfortable """
def __init__(self, DS, **kwargs):
""" Initialize with the training data set DS. All keywords given are set as member variables.
The following are particularly important:
:key hidden: number of hidden units
:key TDS: test data set for checking convergence
:key VDS: validation data set for final performance evaluation
:key epoinc: number of epochs to train for, before checking convergence (default: 5)
"""
self.DS = DS
self.hidden = 10
self.maxepochs = 1000
self.Graph = None
self.TDS = None
self.VDS = None
self.epoinc = 5
setAllArgs(self, kwargs)
self.trainCurve = None
def initGraphics(self, ymax=10, xmax= -1):
""" initialize the interactive graphics output window, and return a handle to the plot """
if xmax < 0:
xmax = self.maxepochs
figure(figsize=[12, 8])
ion()
draw()
#self.Graph = MultilinePlotter(autoscale=1.2 ) #xlim=[0, self.maxepochs], ylim=[0, ymax])
self.Graph = MultilinePlotter(xlim=[0, xmax], ylim=[0, ymax])
self.Graph.setLineStyle([0, 1], linewidth=2)
return self.Graph
def set(self, **kwargs):
""" convenience method to set several member variables at once """
setAllArgs(self, kwargs)
def saveTrainingCurve(self, learnfname):
""" save the training curves into a file with the given name (CSV format) """
logging.info('Saving training curves into ' + learnfname)
if self.trainCurve is None:
logging.error('No training curve available for saving!')
learnf = open(learnfname, "wb")
writer = csv.writer(learnf, dialect='excel')
nDataSets = len(self.trainCurve)
for i in range(1, len(self.trainCurve[0]) - 1):
writer.writerow([self.trainCurve[k][i] for k in range(nDataSets)])
learnf.close()
def saveNetwork(self, fname):
""" save the trained network to a file """
NetworkWriter.writeToFile(self.Trainer.module, fname)
logging.info("Network saved to: " + fname)
#=======================================================================================================
class NNregression(NNtools):
""" Learns to numerically predict the targets of a set of data, with optional online progress plots. """
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Constructs a 3-layer FNN for regression. Optional arguments are passed on to the Trainer class. """
if hidden is not None:
self.hidden = hidden
logging.info("Constructing FNN with following config:")
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim)
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Training FNN with following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly.
CAVEAT: No support for Sequential datasets!"""
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='normalized regression error')
self.Graph.setLegend(['training', 'test'], loc='upper right')
epoch = 0
inc = self.epoinc
best_error = Infinity
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
err_trn = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.DS)
learncurve_y.append(err_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %10g" % (epoch, err_trn))
else:
# calculate same errors on TEST data
err_tst = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.TDS)
valcurve_y.append(err_tst)
if err_tst < best_error:
# store best error and parameters
best_epoch = epoch
best_error = err_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %10g, err_tst: %10g, best_tst: %10g" % (epoch, err_trn, err_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, err_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, err_trn)
self.Graph.update()
# training finished!
logging.info("Best epoch: %6d, with error: %10g" % (best_epoch, best_error))
if self.VDS is not None:
# calculate same errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
err_val = ModuleValidator.validate(Validator.MSE, self.Trainer.module, self.VDS)
logging.info("Result on evaluation data: %10g" % err_val)
# store training curve for saving into file
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
#=======================================================================================================
class NNclassifier(NNtools):
""" Learns to classify a set of data, with optional online progress plots. """
def __init__(self, DS, **kwargs):
""" Initialize the classifier: the least we need is the dataset to be classified. All keywords given are set as member variables. """
if not isinstance(DS, ClassificationDataSet):
raise TypeError('Need a ClassificationDataSet to do classification!')
NNtools.__init__(self, DS, **kwargs)
self.nClasses = self.DS.nClasses # need this because targets may be altered later
self.clsnames = None
self.targetsAreOneOfMany = False
def _convertAllDataToOneOfMany(self, values=[0, 1]):
""" converts all datasets associated with self into 1-out-of-many representations,
e.g. with original classes 0 to 4, the new target for class 1 would be [0,1,0,0,0],
or accordingly with other upper and lower bounds, as given by the values keyword """
if self.targetsAreOneOfMany:
return
else:
# convert all datasets to one-of-many ("winner takes all") representation
for dsname in ["DS", "TDS", "VDS"]:
d = getattr(self, dsname)
if d is not None:
if d.outdim < d.nClasses:
d._convertToOneOfMany(values)
self.targetsAreOneOfMany = True
def setupNN(self, trainer=RPropMinusTrainer, hidden=None, **trnargs):
""" Setup FNN and trainer for classification. """
self._convertAllDataToOneOfMany()
if hidden is not None:
self.hidden = hidden
FNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, outclass=SoftmaxLayer)
logging.info("Constructing classification FNN with following config:")
logging.info(str(FNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(FNN, dataset=self.DS, **trnargs)
def setupRNN(self, trainer=BackpropTrainer, hidden=None, **trnargs):
""" Setup an LSTM RNN and trainer for sequence classification. """
if hidden is not None:
self.hidden = hidden
self._convertAllDataToOneOfMany()
RNN = buildNetwork(self.DS.indim, self.hidden, self.DS.outdim, hiddenclass=LSTMLayer,
recurrent=True, outclass=SoftmaxLayer)
logging.info("Constructing classification RNN with following config:")
logging.info(str(RNN) + "\n Hidden units:\n " + str(self.hidden))
logging.info("Trainer received the following special arguments:")
logging.info(str(trnargs))
self.Trainer = trainer(RNN, dataset=self.DS, **trnargs)
def runTraining(self, convergence=0, **kwargs):
""" Trains the network on the stored dataset. If convergence is >0, check after that many epoch increments
whether test error is going down again, and stop training accordingly. """
assert isinstance(self.Trainer, Trainer)
if self.Graph is not None:
self.Graph.setLabels(x='epoch', y='% classification error')
self.Graph.setLegend(['training', 'test'], loc='lower right')
epoch = 0
inc = self.epoinc
best_error = 100.0
best_epoch = 0
learncurve_x = [0]
learncurve_y = [0.0]
valcurve_y = [0.0]
converged = False
convtest = 0
if convergence > 0:
logging.info("Convergence criterion: %d batches of %d epochs w/o improvement" % (convergence, inc))
while epoch <= self.maxepochs and not converged:
self.Trainer.trainEpochs(inc)
epoch += inc
learncurve_x.append(epoch)
# calculate errors on TRAINING data
if isinstance(self.DS, SequentialDataSet):
r_trn = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.DS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True)
r_trn = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
learncurve_y.append(r_trn)
if self.TDS is None:
logging.info("epoch: %6d, err_trn: %5.2f%%" % (epoch, r_trn))
else:
# calculate errors on TEST data
if isinstance(self.DS, SequentialDataSet):
r_tst = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.TDS))
else:
# FIXME: messy - validation does not belong into the Trainer...
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.TDS)
r_tst = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
valcurve_y.append(r_tst)
if r_tst < best_error:
best_epoch = epoch
best_error = r_tst
bestweights = self.Trainer.module.params.copy()
convtest = 0
else:
convtest += 1
logging.info("epoch: %6d, err_trn: %5.2f%%, err_tst: %5.2f%%, best_tst: %5.2f%%" % (epoch, r_trn, r_tst, best_error))
if self.Graph is not None:
self.Graph.addData(1, epoch, r_tst)
# check if convegence criterion is fulfilled (no improvement after N epoincs)
if convtest >= convergence:
converged = True
if self.Graph is not None:
self.Graph.addData(0, epoch, r_trn)
self.Graph.update()
logging.info("Best epoch: %6d, with error: %5.2f%%" % (best_epoch, best_error))
if self.VDS is not None:
# calculate errors on VALIDATION data
self.Trainer.module.params[:] = bestweights.copy()
if isinstance(self.DS, SequentialDataSet):
r_val = 100. * (1.0 - testOnSequenceData(self.Trainer.module, self.VDS))
else:
out, trueclass = self.Trainer.testOnClassData(return_targets=True, dataset=self.VDS)
r_val = 100. * (1.0 - Validator.classificationPerformance(out, trueclass))
logging.info("Result on evaluation data: %5.2f%%" % r_val)
self.trainCurve = (learncurve_x, learncurve_y, valcurve_y)
| bsd-3-clause |
taknevski/tensorflow-xsmm | tensorflow/python/estimator/inputs/pandas_io_test.py | 89 | 8340 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(TypeError,
'shuffle must be explicitly set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
utke1/numpy | numpy/core/tests/test_multiarray.py | 3 | 247597 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
import gc
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_warns,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_too_big_error(self):
# 45341 is the smallest integer greater than sqrt(2**31 - 1).
# 3037000500 is the smallest integer greater than sqrt(2**63 - 1).
# We want to make sure that the square byte array with those dimensions
# is too big on 32 or 64 bit systems respectively.
if np.iinfo('intp').max == 2**31 - 1:
shape = (46341, 46341)
elif np.iinfo('intp').max == 2**63 - 1:
shape = (3037000500, 3037000500)
else:
return
assert_raises(ValueError, np.empty, shape, dtype=np.int8)
assert_raises(ValueError, np.zeros, shape, dtype=np.int8)
assert_raises(ValueError, np.ones, shape, dtype=np.int8)
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the system
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
# This test can fail on 32-bit systems due to insufficient
# contiguous memory. Deallocating the previous array increases the
# chance of success.
del(d)
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
def test_false_len_sequence(self):
# gh-7264, segfault for this example
class C:
def __getitem__(self, i):
raise IndexError
def __len__(self):
return 42
assert_raises(ValueError, np.array, C()) # segfault?
def test_failed_len_sequence(self):
# gh-7393
class A(object):
def __init__(self, data):
self._data = data
def __getitem__(self, item):
return type(self)(self._data[item])
def __len__(self):
return len(self._data)
# len(d) should give 3, but len(d[0]) will fail
d = A([1,2,3])
assert_equal(len(np.array(d)), 3)
def test_array_too_big(self):
# Test that array creation succeeds for arrays addressable by intp
# on the byte level and fails for too large arrays.
buf = np.zeros(100)
max_bytes = np.iinfo(np.intp).max
for dtype in ["intp", "S20", "b"]:
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
np.ndarray(buffer=buf, strides=(0,),
shape=(max_bytes//itemsize,), dtype=dtype)
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with suppress_warnings() as sup:
sup.filter(FutureWarning, "elementwise == comparison failed")
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
# New in 1.12: This behavior changes in 1.13, test for dep warning
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
with assert_warns(FutureWarning):
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
# check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
def test_zero_width_string(self):
# Test for PR #6430 / issues #473, #4955, #2585
dt = np.dtype([('I', int), ('S', 'S0')])
x = np.zeros(4, dtype=dt)
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['S'].itemsize, 0)
x['S'] = ['a', 'b', 'c', 'd']
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #4955
x['S'][x['I'] == 0] = 'hello'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Variation on test case from #2585
x['S'] = 'A'
assert_equal(x['S'], [b'', b'', b'', b''])
assert_equal(x['I'], [0, 0, 0, 0])
# Allow zero-width dtypes in ndarray constructor
y = np.ndarray(4, dtype=x['S'].dtype)
assert_equal(y.itemsize, 0)
assert_equal(x['S'], y)
# More tests for indexing an array with zero-width fields
assert_equal(np.zeros(4, dtype=[('a', 'S0,S0'),
('b', 'u1')])['a'].itemsize, 0)
assert_equal(np.empty(3, dtype='S0,S0').itemsize, 0)
assert_equal(np.zeros(4, dtype='S0,u1')['f0'].itemsize, 0)
xx = x['S'].reshape((2, 2))
assert_equal(xx.itemsize, 0)
assert_equal(xx, [[b'', b''], [b'', b'']])
b = io.BytesIO()
np.save(b, xx)
b.seek(0)
yy = np.load(b)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
with temppath(suffix='.npy') as tmp:
np.save(tmp, xx)
yy = np.load(tmp)
assert_equal(yy.itemsize, 0)
assert_equal(xx, yy)
def test_base_attr(self):
a = np.zeros(3, dtype='i4,f4')
b = a[0]
assert_(b.base is a)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = arr.compress([0, 1, 0, 1, 0], axis=1)
assert_equal(out, tgt)
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1], axis=1)
assert_equal(out, tgt)
arr = np.arange(10).reshape(2, 5)
out = arr.compress([0, 1])
assert_equal(out, 1)
def test_choose(self):
x = 2*np.ones((3,), dtype=int)
y = 3*np.ones((3,), dtype=int)
x2 = 2*np.ones((2, 3), dtype=int)
y2 = 3*np.ones((2, 3), dtype=int)
ind = np.array([0, 0, 1])
A = ind.choose((x, y))
assert_equal(A, [2, 2, 3])
A = ind.choose((x2, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
for ctype in [np.int16, np.uint16, np.int32, np.uint32,
np.float32, np.float64, np.complex64, np.complex128]:
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
self.assertRaises(ArithmeticError, a.prod)
self.assertRaises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
np.array([50, 36, 84, 180], ctype))
assert_array_equal(a2.prod(axis=-1),
np.array([24, 1890, 600], ctype))
def test_repeat(self):
m = np.array([1, 2, 3, 4, 5, 6])
m_rect = m.reshape((2, 3))
A = m.repeat([1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
A = m.repeat(2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
A = m_rect.repeat([2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = m_rect.repeat([1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
A = m_rect.repeat(2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = m_rect.repeat(2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
def test_reshape(self):
arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(arr.reshape(2, 6), tgt)
tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
assert_equal(arr.reshape(3, 4), tgt)
tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]]
assert_equal(arr.reshape((3, 4), order='F'), tgt)
tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
assert_equal(arr.T.reshape((3, 4), order='C'), tgt)
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_squeeze(self):
a = np.array([[[1], [2], [3]]])
assert_equal(a.squeeze(), [1, 2, 3])
assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]])
assert_raises(ValueError, a.squeeze, axis=(1,))
assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]])
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the less-than comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
# test generic class with bogus ordering,
# should not segfault.
class Boom(object):
def __lt__(self, other):
return True
a = np.array([Boom()]*100, dtype=object)
for kind in ['q', 'm', 'h']:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
do = d.copy()
x = d
# create a median of 3 killer where each median is the sorted second
# last element of the quicksort partition
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
assert_equal(np.sort(d), do)
assert_equal(d[np.argsort(d)], do)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
d = np.arange(10)
assert_raises(TypeError, d.argpartition, 9.)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.argpartition, 9.)
def test_partition_integer(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(TypeError, d.partition, 9.)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones(1)
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones(50)
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange(49)
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange(47)[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange(47) % 7
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:i, :] <= p[i, :]).all(),
msg="%d: %r <= %r" % (i, p[i, :], p[:i, :]))
at((p[i + 1:, :] > p[i, :]).all(),
msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None, :]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
d = np.arange(24).reshape(4, 6)
ddt = np.array(
[[ 55, 145, 235, 325],
[ 145, 451, 757, 1063],
[ 235, 757, 1279, 1801],
[ 325, 1063, 1801, 2539]]
)
dtd = np.array(
[[504, 540, 576, 612, 648, 684],
[540, 580, 620, 660, 700, 740],
[576, 620, 664, 708, 752, 796],
[612, 660, 708, 756, 804, 852],
[648, 700, 752, 804, 856, 908],
[684, 740, 796, 852, 908, 964]]
)
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
assert_equal(np.dot(eaf, eaf), eaf)
assert_equal(np.dot(eaf.T, eaf), eaf)
assert_equal(np.dot(eaf, eaf.T), eaf)
assert_equal(np.dot(eaf.T, eaf.T), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
assert_equal(np.dot(ebf, ebf), eaf)
assert_equal(np.dot(ebf.T, ebf), eaf)
assert_equal(np.dot(ebf, ebf.T), eaf)
assert_equal(np.dot(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
np.dot(edf[::-1, :], edf.T),
np.dot(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf[:, ::-1], edf.T),
np.dot(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
np.dot(edf, edf[::-1, :].T),
np.dot(edf, edf[::-1, :].T.copy())
)
assert_equal(
np.dot(edf, edf[:, ::-1].T),
np.dot(edf, edf[:, ::-1].T.copy())
)
assert_equal(
np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
assert_equal(np.dot(edf, edf.T), eddtf)
assert_equal(np.dot(edf.T, edf), edtdf)
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_dot_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.dot, c, A)
assert_raises(TypeError, np.dot, A, c)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
# when calling np.put, make sure a
# TypeError is raised if the object
# isn't an ndarray
bad_array = [1, 2, 3]
assert_raises(TypeError, np.put, bad_array, [0, 2], 5)
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array(7, dtype=dt)
b = np.array([7], dtype=dt)
c = np.array([[[[[7]]]]], dtype=dt)
msg = 'dtype: {0}'.format(dt)
ap = complex(a)
assert_equal(ap, a, msg)
bp = complex(b)
assert_equal(bp, b, msg)
cp = complex(c)
assert_equal(cp, c, msg)
def test__complex__should_not_work(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8',
'f', 'd', 'g', 'F', 'D', 'G',
'?', 'O']
for dt in dtypes:
a = np.array([1, 2, 3], dtype=dt)
assert_raises(TypeError, complex, a)
dt = np.dtype([('a', 'f8'), ('b', 'i1')])
b = np.array((1.0, 3), dtype=dt)
assert_raises(TypeError, complex, b)
c = np.array([(1.0, 3), (2e-3, 7)], dtype=dt)
assert_raises(TypeError, complex, c)
d = np.array('1+1j')
assert_raises(TypeError, complex, d)
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
if i < len(inputs):
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
if ('out' in kw) and (kw['out'] is not None):
kw['out'] = np.asarray(kw['out'])
r = func(*inputs, **kw)
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return i
a = CheckIndex()
dummy = np.arange(2.)
# 1 input, 1 output
assert_equal(np.sin(a), 0)
assert_equal(np.sin(dummy, a), 1)
assert_equal(np.sin(dummy, out=a), 1)
assert_equal(np.sin(dummy, out=(a,)), 1)
assert_equal(np.sin(a, a), 0)
assert_equal(np.sin(a, out=a), 0)
assert_equal(np.sin(a, out=(a,)), 0)
# 1 input, 2 outputs
assert_equal(np.modf(dummy, a), 1)
assert_equal(np.modf(dummy, None, a), 2)
assert_equal(np.modf(dummy, dummy, a), 2)
assert_equal(np.modf(dummy, out=a), 1)
assert_equal(np.modf(dummy, out=(a,)), 1)
assert_equal(np.modf(dummy, out=(a, None)), 1)
assert_equal(np.modf(dummy, out=(a, dummy)), 1)
assert_equal(np.modf(dummy, out=(None, a)), 2)
assert_equal(np.modf(dummy, out=(dummy, a)), 2)
assert_equal(np.modf(a, out=(dummy, a)), 0)
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
assert_equal(np.add(dummy, a), 1)
assert_equal(np.add(dummy, dummy, a), 2)
assert_equal(np.add(dummy, a, a), 1)
assert_equal(np.add(dummy, dummy, out=a), 2)
assert_equal(np.add(dummy, dummy, out=(a,)), 2)
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if 'out' in kw:
tmp_kw = kw.copy()
tmp_kw.pop('out')
func = getattr(ufunc, method)
kw['out'][...] = func(*inputs, **tmp_kw)
A = np.array([0]).view(OutClass)
B = np.array([5])
C = np.array([6])
np.multiply(C, B, A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
A[0] = 0
np.multiply(C, B, out=A)
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a': 1}, {'b': 2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:, :, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmax(), 0)
a[3] = 10
assert_equal(a.argmax(), 3)
a[1] = 30
assert_equal(a.argmax(), 1)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2, 3))
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(a.argmin(), 0)
a[3] = 30
assert_equal(a.argmin(), 3)
a[1] = 10
assert_equal(a.argmin(), 1)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
def test_nan(self):
input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan])
result = input_arr.clip(-1, 1)
expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan])
assert_array_equal(result, expected)
class TestCompress(TestCase):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
tgt = [[1, 3], [6, 8]]
out = np.compress([0, 1, 0, 1, 0], arr, axis=1)
assert_equal(out, tgt)
def test_truncate(self):
tgt = [[1], [6]]
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr, axis=1)
assert_equal(out, tgt)
def test_flatten(self):
arr = np.arange(10).reshape(2, 5)
out = np.compress([0, 1], arr)
assert_equal(out, 1)
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
assert_equal(x.dtype, T)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [bytes, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
def test_object(self): # gh-6312
a = np.random.choice(10, 1000)
b = np.random.choice(['abc', 'xy', 'wz', 'efghi', 'qwst', 'x'], 1000)
for u in a, b:
left = np.lexsort((u.astype('O'),))
right = np.argsort(u, kind='mergesort')
assert_array_equal(left, right)
for u, v in (a, b), (b, a):
idx = np.lexsort((u, v))
assert_array_equal(idx, np.lexsort((u.astype('O'), v)))
assert_array_equal(idx, np.lexsort((u, v.astype('O'))))
u, v = np.array(u, dtype='object'), np.array(v, dtype='object')
assert_array_equal(idx, np.lexsort((u, v)))
def test_invalid_axis(self): # gh-7528
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(ValueError, np.lexsort, x, axis=2)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
# this should probably be supported as a file
# but for now test for proper errors
b = io.BytesIO()
assert_raises(IOError, np.fromfile, b, np.uint8, 80)
d = np.ones(7)
assert_raises(IOError, lambda x: x.tofile(b), d)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_unbuffered_fromfile(self):
# gh-6246
self.x.tofile(self.filename)
def fail(*args, **kwargs):
raise io.IOError('Can not tell or seek')
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_largish_file(self):
# check the fallocate path on files > 16MB
d = np.zeros(4 * 1024 ** 2)
d.tofile(self.filename)
assert_equal(os.path.getsize(self.filename), d.nbytes)
assert_array_equal(d, np.fromfile(self.filename))
# check offset
with open(self.filename, "r+b") as f:
f.seek(d.nbytes)
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
# check append mode (gh-8329)
open(self.filename, "w").close() # delete file contents
with open(self.filename, "ab") as f:
d.tofile(f)
assert_array_equal(d, np.fromfile(self.filename))
with open(self.filename, "ab") as f:
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
#assert_equal(s, '1.51,2.0,3.51,4.0')
y = np.array([float(p) for p in s.split(',')])
assert_array_equal(x,y)
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert_(testpassed)
assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
assert_(e.flags.writeable is True)
assert_(f.flags.writeable is True)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
assert_(f.flags.updateifcopy is True)
assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
x.resize((5, 5), refcheck=False)
else:
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, refcheck=False)
else:
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
x.resize(3, 2, 1, refcheck=False)
else:
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
x.resize(2, 3, 3, refcheck=False)
else:
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
if IS_PYPY:
a.resize(15, refcheck=False)
else:
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
def test_assign():
dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
assert_raises(ValueError, test_assign)
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
def test_multiple_field_name_unicode(self):
def test_assign_unicode():
dt = np.dtype([("\u20B9", "f8"),
("B", "f8"),
("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
assert_raises(ValueError, test_assign_unicode)
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
with suppress_warnings() as sup:
sup.filter(FutureWarning,
"Assignment between structured arrays.*")
sup.filter(FutureWarning,
"Numpy has detected that you .*")
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
(2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
(3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
(2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
# For <=1.12 a is not modified, but it will be in 1.13
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
# make sure views of a multi-field index warn too
c = np.zeros(3, dtype='i8,i8,i8')
assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
[FutureWarning])
# make sure assignment using a different dtype warns
a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
# for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert _mean(np.ones(100000, dtype='float16')) == 1
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# 2016-01-29: NUMPY_UFUNC_DISABLED
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_type_mismatch(self):
c = 1.
A = np.array((1,1), dtype='i,i')
assert_raises(TypeError, np.inner, c, A)
assert_raises(TypeError, np.inner, A, c)
def test_inner_scalar_and_vector(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
vec = np.array([1, 2], dtype=dt)
desired = np.array([3, 6], dtype=dt)
assert_equal(np.inner(vec, sca), desired)
assert_equal(np.inner(sca, vec), desired)
def test_inner_scalar_and_matrix(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
sca = np.array(3, dtype=dt)[()]
arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
assert_equal(np.inner(arr, sca), desired)
assert_equal(np.inner(sca, arr), desired)
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(C, A.T), desired)
assert_equal(np.inner(B, C), desired)
assert_equal(np.inner(C, B), desired)
# check a matrix product
desired = np.array([[7, 10], [15, 22]], dtype=dt)
assert_equal(np.inner(A, B), desired)
# check the syrk vs. gemm paths
desired = np.array([[5, 11], [11, 25]], dtype=dt)
assert_equal(np.inner(A, A), desired)
assert_equal(np.inner(A, A.copy()), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
def test_3d_tensor(self):
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
a = np.arange(24).reshape(2,3,4).astype(dt)
b = np.arange(24, 48).reshape(2,3,4).astype(dt)
desired = np.array(
[[[[ 158, 182, 206],
[ 230, 254, 278]],
[[ 566, 654, 742],
[ 830, 918, 1006]],
[[ 974, 1126, 1278],
[1430, 1582, 1734]]],
[[[1382, 1598, 1814],
[2030, 2246, 2462]],
[[1790, 2070, 2350],
[2630, 2910, 3190]],
[[2198, 2542, 2886],
[3230, 3574, 3918]]]],
dtype=dt
)
assert_equal(np.inner(a, b), desired)
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestAlen(TestCase):
def test_basic(self):
m = np.array([1, 2, 3])
self.assertEqual(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
self.assertEqual(np.alen(m), 2)
m = [1, 2, 3]
self.assertEqual(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
self.assertEqual(np.alen(m), 2)
def test_singleton(self):
self.assertEqual(np.alen(5), 1)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
class TestRepeat(TestCase):
def setUp(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
def test_basic(self):
A = np.repeat(self.m, [1, 3, 2, 1, 1, 2])
assert_equal(A, [1, 2, 2, 2, 3,
3, 4, 5, 6, 6])
def test_broadcast1(self):
A = np.repeat(self.m, 2)
assert_equal(A, [1, 1, 2, 2, 3, 3,
4, 4, 5, 5, 6, 6])
def test_axis_spec(self):
A = np.repeat(self.m_rect, [2, 1], axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6]])
A = np.repeat(self.m_rect, [1, 3, 2], axis=1)
assert_equal(A, [[1, 2, 2, 2, 3, 3],
[4, 5, 5, 5, 6, 6]])
def test_broadcast2(self):
A = np.repeat(self.m_rect, 2, axis=0)
assert_equal(A, [[1, 2, 3],
[1, 2, 3],
[4, 5, 6],
[4, 5, 6]])
A = np.repeat(self.m_rect, 2, axis=1)
assert_equal(A, [[1, 1, 2, 2, 3, 3],
[4, 4, 5, 5, 6, 6]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
if HAS_REFCOUNT:
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
if HAS_REFCOUNT:
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
with suppress_warnings() as sup:
sup.filter(DeprecationWarning, "Assigning the 'data' attribute")
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_array_interface_itemsize():
# See gh-6361
my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
'offsets': [0, 8], 'itemsize': 16})
a = np.ones(10, dtype=my_dtype)
descr_t = np.dtype(a.__array_interface__['descr'])
typestr_t = np.dtype(a.__array_interface__['typestr'])
assert_equal(descr_t.itemsize, typestr_t.itemsize)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
gc.collect()
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1,
0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
# See #7949. Dont use "/" operator With -3 switch, since python reports it
# as a DeprecationWarning
if sys.version_info[0] < 3 and not sys.py3kwarning:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
def test_orderconverter_with_nonASCII_unicode_ordering():
# gh-7475
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
rquirozr/Test-Package2 | rq_test1/format_utils.py | 1 | 4992 | #from datetime import datetime
import pandas as pd
class FormatUtils(object):
__str_date_format = '%d/%m/%Y'
__dict_datetimes = {}
__dict_dateints = {}
@classmethod
def get_year(cls, period):
"""
Returns the year of a period.
period: (int) Period.
"""
return period // 100
@classmethod
def get_month(cls, period):
"""
Returns the month of a period.
period: (int) Period.
"""
return period % 100
@classmethod
def gap_in_months_for_periods(cls, period_1, period_2):
"""
Returns the number of month missed between two periods.
period_1: (int) First period.
period_2: (int) Second period.
"""
year_1 = cls.get_year(period_1)
year_2 = cls.get_year(period_2)
month_1 = cls.get_month(period_1)
month_2 = cls.get_month(period_2)
if year_1 == year_2:
basic_difference = abs(month_1 - month_2) - 1
if basic_difference < 0:
basic_difference = 0
return basic_difference
elif year_1 > year_2:
greater_year_dif = month_1
smaller_year_dif = 12 - month_2
basic_difference = greater_year_dif + smaller_year_dif
additional_months_difference = (year_1 - year_2 - 1) * 12
return basic_difference + additional_months_difference - 1
elif year_1 < year_2:
greater_year_dif = month_2
smaller_year_dif = 12 - month_1
basic_difference = greater_year_dif + smaller_year_dif
additional_months_difference = (year_2 - year_1 - 1) * 12
return basic_difference + additional_months_difference - 1
@classmethod
def get_difference_in_months(cls, datetime1, datetime2, dif_base_on_days = False):
"""
Returns the difference (int) from two datetimes:
datetime1: (datetime) First datetime.
datetime2: (datetime) Second datetime.
dif_base_on_days: Should the difference be based on the exact dates or \
based on the months and years of the dates.
"""
if dif_base_on_days:
difference = datetime1 - datetime2
difference = abs(difference)
return difference.days//30
else:
dif_years = datetime1.year - datetime2.year
dif_months = datetime1.month - datetime2.month
return abs(dif_years*12 + dif_months)
@classmethod
def date_to_integer(cls, raw_date, format_date = None, format_integer = '%Y%m'):
"""
Return an integer in format (YYYYMM by default) of a date.
raw_date: Date to convert.
format_date: (str pattern) Format of the raw_date. Default -> automatic detection.
format_integer: (str pattern) Format required to of the conversion. (YYYYMM by default)
"""
# if raw_date in cls.__dict_dateints:
# return cls.__dict_dateints[raw_date]
# else:
# datetime_obj = datetime.strptime(raw_date, cls.__str_date_format)
# if datetime_obj.month > 9:
# current_month = datetime_obj.month
# else:
# current_month = '0'+str(datetime_obj.month)
#
# formatted_date = '{year}{month}'.format(year=datetime_obj.year,
# month=current_month)
# integer_date = int(formatted_date)
# cls.__dict_dateints[raw_date] = integer_date
# return integer_date
return pd.to_datetime(raw_date, format=format_date).strftime(format_integer)
@classmethod
def make_datetime(cls, string_series, format=None, dayfirst=True):
"""
Return a datetime series or string formated by a pattern.
string_series: (str,series) Object to convert.
format: (str pattern) Format of the object.
dayfirst: Should the string_series have the day first ?
"""
if isinstance(string_series, str):
if len(string_series) == 6:
string_series = str(string_series) + '01'
return pd.to_datetime(string_series, format=format, dayfirst=dayfirst)
else:
if len(string_series[0]) == 6:
string_series = string_series.map(lambda x: str(x) + '01')
dates = {date:pd.to_datetime(date, format=format, dayfirst=dayfirst) for date in string_series.unique()}
return string_series.map(dates)
# @classmethod
# def get_datetime(cls, raw_date):
# if raw_date in cls.__dict_datetimes:
# return cls.__dict_datetimes[raw_date]
# else:
# new_datetime = datetime.strptime(raw_date, cls.__str_date_format)
# cls.__dict_datetimes[raw_date] = new_datetime
# return new_datetime
| bsd-3-clause |
ldirer/scikit-learn | examples/plot_missing_values.py | 35 | 3059 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
ShaperTools/openhtf | examples/measurements.py | 2 | 7993 | # Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example OpenHTF test demonstrating use of measurements.
Run with (your virtualenv must be activated first):
python measurements.py
Afterwards, check out the output in measurements.json. If you open both this
example test and that output file and compare them, you should be able to see
where measurement values end up in the output and what the corresponding code
looks like that sets them.
TODO(someone): Write these examples.
For more complex topics, see the validators.py and dimensions.py examples.
For a simpler example, see the hello_world.py example. If the output of this
test is confusing, start with the hello_world.py output and compare it to this
test's output.
Some constraints on measurements:
- Measurement names must be valid python variable names. This is mostly for
sanity, but also ensures you can access them via attribute access in phases.
This applies *after* any with_args() substitution (not covered in this
tutorial, see the phases.py example for more details).
- You cannot declare the same measurement name multiple times on the same
phase. Technically, you *can* declare the same measurement on multiple
phases; measurements are attached to a specific phase in the output. This
isn't recommended, though, because it makes it difficult to flatten a test's
measurements, which some output formats require.
"""
# Import openhtf with an abbreviated name, as we'll be using a bunch of stuff
# from it throughout our test scripts. See __all__ at the top of
# openhtf/__init__.py for details on what's in top-of-module namespace.
import random
import openhtf as htf
# Import this output mechanism as it's the specific one we want to use.
from openhtf.output.callbacks import json_factory
# You won't normally need to import this, see validators.py example for
# more details. It's used for the inline measurement declaration example
# below, but normally you'll only import it when you want to define custom
# measurement validators.
from openhtf.util import validators
# Simple example of measurement use, similar to hello_world.py usage.
@htf.measures(htf.Measurement('hello_world_measurement'))
def hello_phase(test):
test.measurements.hello_world_measurement = 'Hello!'
# An alternative simpler syntax that creates the Measurement for you.
@htf.measures('hello_again_measurement')
def again_phase(test):
test.measurements.hello_again_measurement = 'Again!'
# Multiple measurements can be specified in a single decorator, using either of
# the above syntaxes. Technically, these syntaxes can be mixed and matched, but
# as a matter of convention you should always use one or the other within a
# single decorator call. You'll also note that you can stack multiple
# decorations on a single phase. This is useful if you have a handful of simple
# measurements, and then one or two with more complex declarations (see below).
@htf.measures('first_measurement', 'second_measurement')
@htf.measures(htf.Measurement('third'), htf.Measurement('fourth'))
def lots_of_measurements(test):
test.measurements.first_measurement = 'First!'
# Measurements can also be access via indexing rather than attributes.
test.measurements['second_measurement'] = 'Second :('
# This can be handy for iterating over measurements.
for measurement in ('third', 'fourth'):
test.measurements[measurement] = measurement + ' is the best!'
# Basic key/value measurements are handy, but we may also want to validate a
# measurement against some criteria, or specify additional information
# describing the measurement. Validators can get quite complex, for more
# details, see the validators.py example.
@htf.measures(htf.Measurement('validated_measurement').in_range(0, 10).doc(
'This measurement is validated.').with_units(htf.units.SECOND))
def measure_seconds(test):
# The 'outcome' of this measurement in the test_record result will be a PASS
# because its value passes the validator specified (0 <= 5 <= 10).
test.measurements.validated_measurement = 5
# These additional attributes can also be specified inline as kwargs passed
# directly to the @measures decorator. If you do so, however, you must
# specify exactly one measurement with that decorator (ie. the first argument
# must be a string containing the measurement name). If you want to specify
# multiple measurements this way, you can stack multiple decorators.
@htf.measures('inline_kwargs', docstring='This measurement is declared inline!',
units=htf.units.HERTZ, validators=[validators.in_range(0, 10)])
@htf.measures('another_inline', docstring='Because why not?')
def inline_phase(test):
# This measurement will have an outcome of FAIL, because the set value of 15
# will not pass the 0 <= x <= 10 validator.
test.measurements.inline_kwargs = 15
test.measurements.another_inline = 'This one is unvalidated.'
# Let's log a message so the operator knows the test should fail.
test.logger.info('Set inline_kwargs to a failing value, test should FAIL!')
# A multidim measurement including how to convert to a pandas dataframe and
# a numpy array.
@htf.measures(htf.Measurement('power_time_series')
.with_dimensions('ms', 'V', 'A'))
@htf.measures(htf.Measurement('average_voltage').with_units('V'))
@htf.measures(htf.Measurement('average_current').with_units('A'))
@htf.measures(htf.Measurement('resistance').with_units('ohm').in_range(9, 11))
def multdim_measurements(test):
# Create some fake current and voltage over time data
for t in range(10):
resistance = 10
voltage = 10 + 10.0*t
current = voltage/resistance + .01*random.random()
dimensions = (t, voltage, current)
test.measurements['power_time_series'][dimensions] = 0
# When accessing your multi-dim measurement a DimensionedMeasuredValue
# is returned.
dim_measured_value = test.measurements['power_time_series']
# Let's convert that to a pandas dataframe
power_df = dim_measured_value.to_dataframe(columns=['ms', 'V', 'A', 'n/a'])
test.logger.info('This is what a dataframe looks like:\n%s', power_df)
test.measurements['average_voltage'] = power_df['V'].mean()
# We can convert the dataframe to a numpy array as well
power_array = power_df.as_matrix()
test.logger.info('This is the same data in a numpy array:\n%s', power_array)
test.measurements['average_current'] = power_array.mean(axis=0)[2]
# Finally, let's estimate the resistance
test.measurements['resistance'] = (
test.measurements['average_voltage'] /
test.measurements['average_current'])
if __name__ == '__main__':
# We instantiate our OpenHTF test with the phases we want to run as args.
test = htf.Test(hello_phase, again_phase, lots_of_measurements,
measure_seconds, inline_phase, multdim_measurements)
# In order to view the result of the test, we have to output it somewhere,
# and a local JSON file is a convenient way to do this. Custom output
# mechanisms can be implemented, but for now we'll just keep it simple.
# This will always output to the same ./measurements.json file, formatted
# slightly for human readability.
test.add_output_callbacks(
json_factory.OutputToJSON('./measurements.json', indent=2))
# Unlike hello_world.py, where we prompt for a DUT ID, here we'll just
# use an arbitrary one.
test.execute(test_start=lambda: 'MyDutId')
| apache-2.0 |
iagapov/ocelot | adaptors/csrtrack2ocelot.py | 1 | 3516 |
import numpy as np
from ocelot.common.globals import *
from ocelot.cpbd.beam import ParticleArray
from scipy import interpolate
def FindRefParticle(PD, col, weight, m=6):
for i in range(0, m):
x0 = np.mean(PD[:, i])
PD[:, i] = PD[:, i] - x0
sig0 = np.std(PD[:, i])
PD[:, i] = np.abs(PD[:, i])
if sig0>0:
PD[:, i] = PD[:, i]/sig0
PD[:, col] = PD[:, col]*weight
[r, i0] = np.min(np.sum(np.transpose(PD)))
return i0, r
def sortrows(x, col):
return x[x[:, col].argsort()]
def SaveAstraParticles(outfile, PD, Q, findref=False, z0=None):
#%SaveAstraParticles(outfile,PD,Q,findref,z0)
n = len(PD[:,0])
if z0 == None:
findref=False
if findref:
i0, r = FindRefParticle(PD, 3, 10)
P0 = PD[0,:]
PD[0, :] = PD[i0, :]
PD[i0, :] = P0
PD[2:n, :] = sortrows(PD[1:n, :], 3)
PD[1:n, 3-1] = PD[1:n, 3-1] - PD[0, 3-1]
PD[1:n, 6-1] = PD[1:n, 6-1] - PD[0, 6-1] #substract reference particle
PD1 = np.zeros((n, 10))
PD1[:, 0:6] = PD[:, 0:6]
PD1[:, 7] = -Q/n
PD1[:, 8] = 1
PD1[:, 9] = 5
if z0 !=None:
PD1[0,2] = z0
np.save(outfile,PD1)
def load_Astra_particles(filename):
PD = np.loadtxt(filename)
n = len(PD[:, 0])
Q = np.abs(np.sum(PD[:,7]))
PD[1:n, 2] = PD[1:n, 2] + PD[0, 2]
PD[1:n, 5] = PD[1:n, 5] + PD[0, 7] # add reference particle
PD1 = PD[:, 0:6]
return PD1, Q
def csrtrackBeam2particleArray(filename, orient="H"):
#H z x y pz px py -> x y z px py pz
#V z y x pz py px -> x y -z px py -pz
PD = np.loadtxt(filename)
#PD = load(infile)
n = np.shape(PD)[0] - 1 #length(PD(:,1))- 1
#print(n)
PD1 = np.zeros((n, 6))
Q = np.sum(PD[1:, 6])*1e9
t0 = PD[0, 0]
if orient=='H':
PD1[:, 1-1] = PD[1:, 2-1]
PD1[:, 2-1] = PD[1:, 3-1]
PD1[:, 3-1] = PD[1:, 1-1]
PD1[:, 4-1] = PD[1:, 5-1]
PD1[:, 5-1] = PD[1:, 6-1]
PD1[:, 6-1] = PD[1:, 4-1]
else:
PD1[:, 1-1] = -PD[1:, 3-1]
PD1[:, 2-1] = PD[1:, 2-1]
PD1[:, 3-1] = PD[1:, 1-1]
PD1[:, 4-1] = -PD[1:, 6-1]
PD1[:, 5-1] = PD[1:, 5-1]
PD1[:, 6-1] = PD[1:, 4-1]
#print("CSR", PD1[0, :])
for i in range(6):
PD1[1:n, i] = PD1[1:n, i] + PD1[0, i]
p_ref = np.sqrt(PD1[0, 3]**2 + PD1[0, 4]**2 + PD1[0, 5]**2)
px = PD1[:, 3]/ p_ref
py = PD1[:, 4] / p_ref
Eref = np.sqrt(m_e_eV ** 2 + p_ref ** 2)
pe = (np.sqrt(m_e_eV**2 + (PD1[:, 3]**2 + PD1[:, 4]**2 + PD1[:, 5]**2)) - Eref) / p_ref
p_array = ParticleArray(n)
p_array.rparticles[0] = PD1[:, 0] - 0*PD1[0, 0]
p_array.rparticles[2] = PD1[:, 1] - 0*PD1[0, 1]
p_array.rparticles[4] = -(PD1[:, 2] - PD1[0, 2])
p_array.rparticles[1] = px[:]
p_array.rparticles[3] = py[:]
p_array.rparticles[5] = pe[:]
p_array.q_array[:] = PD[1:, 6]
p_array.s = PD1[0, 2]
p_array.E = Eref*1e-9
return p_array
#def xyz2ParticleArray():
if __name__ == "__main__":
import matplotlib.pyplot as plt
filename = "C:\\Users\\tomins\\Documents\\Dropbox\\DESY\\repository\\For_Sergey\\test_ocelot_bc2\\N5_BC3\\out\\out.fmt1"
pd, Q, t = load_CSRtrack_particles(filename, orient="H")
sig_2 = np.std(pd[:, 2])
print(sig_2*0.003)
pd[:, 2] = pd[:, 2] - np.mean(pd[:, 2])
B = s_to_cur(pd[:, 2], 0.003*sig_2, Q*1e-9, v=299792458)
print(B[:10, 0])
print(B[:10, 1])
plt.plot(B[:,0],B[:, 1],'b')
plt.show() | gpl-3.0 |
trashkalmar/omim | tools/python/booking_hotels_quality.py | 20 | 2632 | #!/usr/bin/env python
# coding: utf8
from __future__ import print_function
from collections import namedtuple, defaultdict
from datetime import datetime
from sklearn import metrics
import argparse
import base64
import json
import logging
import matplotlib.pyplot as plt
import os
import pickle
import time
import urllib2
import re
# init logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s: %(message)s')
def load_binary_list(path):
"""Loads reference binary classifier output. """
bits = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
bits.append(1 if line.startswith('y') else 0)
return bits
def load_score_list(path):
"""Loads list of matching scores. """
scores = []
with open(path, 'r') as fd:
for line in fd:
if (not line.strip()) or line.startswith('#'):
continue
scores.append(float(re.search(r'result score: (\d*\.\d+)', line).group(1)))
return scores
def process_options():
# TODO(mgsergio): Fix description.
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose")
parser.add_argument("-q", "--quiet", action="store_false", dest="verbose")
parser.add_argument("--reference_list", dest="reference_list", help="Path to data files")
parser.add_argument("--sample_list", dest="sample_list", help="Name and destination for output file")
parser.add_argument("--show", dest="show", default=False, action="store_true",
help="Show graph for precision and recall")
options = parser.parse_args()
if not options.reference_list or not options.sample_list:
parser.print_help()
exit()
return options
def main():
options = process_options()
reference = load_binary_list(options.reference_list)
sample = load_score_list(options.sample_list)
precision, recall, threshold = metrics.precision_recall_curve(reference, sample)
aa = zip(precision, recall, threshold)
max_by_hmean = max(aa, key=lambda (p, r, t): p*r/(p+r))
print("Optimal threshold: {2} for precision: {0} and recall: {1}".format(*max_by_hmean))
print("AUC: {0}".format(metrics.roc_auc_score(reference, sample)))
if options.show:
plt.plot(recall, precision)
plt.title("Precision/Recall")
plt.ylabel("Precision")
plt.xlabel("Recall")
plt.show()
if __name__ == "__main__":
main()
| apache-2.0 |
b-cell-immunology/sciReptor_analysis | python/region_lengths.py | 1 | 3478 | # -*- coding: utf-8 -*-
import igdb_plotting as igplt
import numpy as np
import MySQLdb as mysql
import matplotlib.pyplot as plt
import sys
import argparse
import igdb_queries as igdbq
parser = argparse.ArgumentParser()
parser.add_argument("event_infile",
type = str,
help="File containing different SQL queries yielding a list of event_ids.")
parser.add_argument("-d", "--database",
type = str,
help="optional manual input of database, otherwise taken from config")
parser.add_argument("-n", "--normalize",
help="normalization to total count",
action="store_true")
parser.add_argument("-r", "--region", type=str,
help="Region of interest. Thought for CDR3, but also others can be plotted.",
choices=['CDR1','CDR2','CDR3','FR1','FR2','FR3'])
parser.add_argument("-l", "--locus", type=str,
help="locus H, K, L",
choices=['H','K','L'])
parser.add_argument("-c", "--cumulative",
help="Show cumulative frequencies",
action="store_true")
parser.add_argument("-o", "--outputdir", type=str,
help="directory for pdf output")
args = parser.parse_args()
db = args.database
# connect to database via ~/.my.conf settings
connection = mysql.connect(db=db,read_default_file="~/.my.cnf", read_default_group='mysql_igdb')
cursor = connection.cursor()
# generate event list. Will later on be generated by another program and taken up by pickle (or called as module).
event_names, event_statements = igdbq.read_eventfile(args.event_infile, db)
def get_region_lengths (event_statement):
region_statement = "SELECT COUNT(DISTINCT sequences.seq_id) as cnt, prot_length \
FROM %s.CDR_FWR \
JOIN %s.sequences ON sequences.seq_id = CDR_FWR.seq_id \
AND sequences.consensus_rank = 1 \
JOIN %s.event ON event.event_id = sequences.event_id \
WHERE CDR_FWR.region = '%s' and sequences.locus = '%s' \
AND event.event_id IN (%s) \
GROUP BY prot_length \
ORDER BY prot_length ASC" % (db, db, db, args.region, args.locus, event_statement)
cursor.execute(region_statement)
length_rows = cursor.fetchall()
length_heights = []
lengths = []
for length in length_rows:
length_heights.append(length[0])
lengths.append(length[1])
return length_heights, lengths
#plt.figure()
for event_name, event_statement in zip(event_names, event_statements):
heights,lengths = get_region_lengths(event_statement)
if args.normalize:
heights = [height/float(sum(heights)) for height in heights]
if args.cumulative:
heights = [sum(heights[:i]) for i in range(len(heights))]
plt.plot(lengths, heights, label = event_name)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlabel("Length of " + args.region + " region in amino acids")
if args.cumulative:
title = "Cumulative frequency"
else:
title = "Observed frequency"
if args.normalize:
title = title + " (normalized)"
plt.ylabel(title)
ttl = plt.title(igplt.plot_log(args.region + ' region length distribution', sys.argv, db))
plt.savefig(args.outputdir + "/%s_%s_%s_%s" % (args.event_infile, args.locus, args.region, title) + '.pdf', bbox_extra_artists=(ttl,), bbox_inches='tight')
| agpl-3.0 |
djgagne/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
apcooper/bright_analysis | py/bright_analysis/plots/skyplot.py | 1 | 2328 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import time
import sys
import healpy as hp
import desiutil.plots as desiplot
import desitarget.io
import astropy.io.fits as fits
import numpy as np
import matplotlib.pyplot as pl
from astropy.table import Table
from matplotlib import rcParams
rcParams['font.family'] = 'monospace'
from bright_analysis.sweeps.io import prepare_sweep_data
############################################################
def ra_dec_ang2pix(nside,ra,dec,nest=False):
"""
"""
theta = (dec+90.0)*np.pi/180.0
phi = ra*np.pi/180.0
return hp.ang2pix(nside,theta,phi,nest=nest)
############################################################
def plot_epoch_summary(sweep_root_dir,data=None,epoch=0,
lgrid_deg=1,
filetype='observed',
savepath=None):
"""
"""
if savepath is not None:
os.path.splitext(savepath)[-1] in ['.png','.pdf']
# Load the data if not passed directly
data = prepare_sweep_data(sweep_root_dir,data,epoch,filetype=filetype)
ra_edges = np.linspace(0,360,360.0/float(lgrid_deg))
dec_edges = np.linspace(-90,90,180.0/float(lgrid_deg))
grid, xbin, ybin = np.histogram2d(data['RA'],data['DEC'],bins=(ra_edges,dec_edges))
# Units of density are stars per square degree
grid = grid/(lgrid_deg**2)
figure = pl.figure(figsize=(9,7))
desiplot.plot_grid_map(grid.T,ra_edges,dec_edges,label='Epoch %d, N stars per sq. deg.'%(epoch))
pl.draw()
if savepath is not None:
pl.savefig(savepath,bbox_inches='tight',pad_inches=0.1)
print('Saved figure to {}'.format(savepath))
return
############################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('sweep_root_dir')
parser.add_argument('-e','--epoch',default=0, type=int)
parser.add_argument('-l','--lgrid',default=1.0,type=float)
parser.add_argument('-s','--savepath',default=None)
args = parser.parse_args()
plot_epoch_summary(args.sweep_root_dir,epoch=args.epoch,
lgrid_deg=args.lgrid,
savepath=args.savepath)
| bsd-3-clause |
ClimbsRocks/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
hmendozap/auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_nystroem_sampler.py | 1 | 4484 | import unittest
import numpy as np
import sklearn.preprocessing
from autosklearn.pipeline.components.feature_preprocessing.nystroem_sampler import \
Nystroem
from autosklearn.pipeline.util import _test_preprocessing, get_dataset
class NystroemComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(Nystroem)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
self.assertFalse((transformation == 0).all())
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None})
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(
X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
#@unittest.skip("Right now, the RBFSampler returns a float64 array!")
def _test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris")
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris")
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset("iris", make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(random_state=1,
**{hp.hyperparameter.name: hp.value
for hp
in
default.values.values()})
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| bsd-3-clause |
elkingtonmcb/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
sherazkasi/SabreSoftware | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
stonebig/bokeh | sphinx/source/docs/user_guide/examples/extensions_example_latex.py | 1 | 3439 | """ The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
import numpy as np
from bokeh.models import Label
from bokeh.plotting import figure, show
from bokeh.util.compiler import TypeScript
TS_CODE = """
import * as p from "core/properties"
import {Label, LabelView} from "models/annotations/label"
declare const katex: any
export class LatexLabelView extends LabelView {
model: LatexLabel
render(): void {
//--- Start of copied section from ``Label.render`` implementation
// Here because AngleSpec does units tranform and label doesn't support specs
let angle: number
switch (this.model.angle_units) {
case "rad": {
angle = -this.model.angle
break
}
case "deg": {
angle = (-this.model.angle * Math.PI) / 180.0
break
}
default:
throw new Error("unreachable code")
}
const panel = this.panel != null ? this.panel : this.plot_view.frame
const xscale = this.plot_view.frame.xscales[this.model.x_range_name]
const yscale = this.plot_view.frame.yscales[this.model.y_range_name]
let sx = this.model.x_units == "data" ? xscale.compute(this.model.x) : panel.xview.compute(this.model.x)
let sy = this.model.y_units == "data" ? yscale.compute(this.model.y) : panel.yview.compute(this.model.y)
sx += this.model.x_offset
sy -= this.model.y_offset
//--- End of copied section from ``Label.render`` implementation
// Must render as superpositioned div (not on canvas) so that KaTex
// css can properly style the text
this._css_text(this.plot_view.canvas_view.ctx, "", sx, sy, angle)
// ``katex`` is loaded into the global window at runtime
// katex.renderToString returns a html ``span`` element
katex.render(this.model.text, this.el, {displayMode: true})
}
}
export namespace LatexLabel {
export type Attrs = p.AttrsOf<Props>
export type Props = Label.Props
}
export interface LatexLabel extends LatexLabel.Attrs {}
export class LatexLabel extends Label {
properties: LatexLabel.Props
constructor(attrs?: Partial<LatexLabel.Attrs>) {
super(attrs)
}
static initClass() {
this.prototype.type = 'LatexLabel'
this.prototype.default_view = LatexLabelView
}
}
LatexLabel.initClass()
"""
class LatexLabel(Label):
"""A subclass of the Bokeh built-in `Label` that supports rendering
LaTex using the KaTex typesetting library.
Only the render method of LabelView is overloaded to perform the
text -> latex (via katex) conversion. Note: ``render_mode="canvas``
isn't supported and certain DOM manipulation happens in the Label
superclass implementation that requires explicitly setting
`render_mode='css'`).
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.6.0/katex.min.css"]
__implementation__ = TypeScript(TS_CODE)
x = np.arange(0.0, 1.0 + 0.01, 0.01)
y = np.cos(2 * 2 * np.pi * x) + 2
p = figure(title="LaTex Demonstration", plot_width=500, plot_height=500)
p.line(x, y)
# Note: must set ``render_mode="css"``
latex = LatexLabel(text="f = \sum_{n=1}^\infty\\frac{-e^{i\pi}}{2^n}!",
x=40, y=420, x_units='screen', y_units='screen',
render_mode='css', text_font_size='16pt',
background_fill_alpha=0)
p.add_layout(latex)
show(p)
| bsd-3-clause |
dalejung/ts-charting | ts_charting/plot_3d.py | 1 | 1275 | import pandas as pd
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pylab import plt
def grab_first_unique(index):
"""
There are instances where you are subsetting your data and end up with a MultiIndex where
one level is constant. This function grabs the first (and hopefully) unique level and returns it.
This is to you can plot a DataFrame that has the correct format but might have an extraneous index level
"""
if isinstance(index, pd.MultiIndex):
for i in range(index.nlevels):
ind = index.get_level_values(i)
if ind.is_unique:
return ind
return index
def _3d_values(df):
# grab the first non-unique index
index = grab_first_unique(df.index)
columns = grab_first_unique(df.columns)
X, Y = np.meshgrid(index, columns)
Z = df.values.reshape(len(X), len(Y))
return {'values': (X, Y, Z), 'labels': (index.name, columns.name)}
def plot_wireframe(df, ax=None, *args, **kwargs):
if ax is None:
fig = plt.figure()
ax = Axes3D(fig)
res = _3d_values(df)
X, Y, Z = res['values']
x_name, y_name = res['labels']
ax.plot_wireframe(X, Y, Z, *args, **kwargs)
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
return ax
| mit |
kensugino/jGEM | jgem/go/parser.py | 1 | 14033 | """ Copyright (c) 2004, Ken Sugino
GO (Gene Ontology) related stuffs.
"""
import os
import re
try:
from cPickle import load, dump # < v3
except:
from pickle import load, dump # > v3
try:
from itertools import izip
except:
izip = zip
import pandas as PD
# class Tree(object):
# def __init__(self,rootid=None,parents={}):
# self.rootid = rootid # id is index
# self.parents = parents # id(idx) => parent id(idx) dict
# self.children = children # id(idx) => [child id(idx), ...] list
class GOSlim(object):
pbin = ['DNA metabolism',
'RNA metabolism',
'cell adhesion',
'cell cycle and proliferation',
'cell organization and biogenesis',
'cell-cell signaling',
'death',
'developmental processes',
'other biological processes',
'other metabolic processes',
'protein metabolism',
'signal transduction',
'stress response',
'transport']
mbin = ['bone, tooth or skin structural activity',
'chaperone-related activity',
'cytoskeletal activity',
'enzyme regulator activity',
'extracellular structural activity',
'kinase activity',
'nucleic acid binding activity',
'other molecular function',
'signal transduction activity',
'transcription regulatory activity',
'translation activity',
'transporter activity']
cbin = ['ER/Golgi',
'cytoskeleton',
'cytosol',
'extracellular matrix',
'mitochondrion',
'non-structural extracellular',
'nucleus',
'other cellular component',
'other cytoplasmic organelle',
'other membranes',
'plasma membrane',
'translational apparatus']
def __init__(self, fname='map2MGIslim.txt'):
self.fname = fname
self.df = df = PD.read_table(fname, header=0) # 40809 entries as of 2014-01-01
# self.s2e = s2e = {s:i for i,s in enumerate(self.pbin+self.mbin+self.cbin)}
# self.e2s = e2s = {i:s for i,s in enumerate(self.pbin+self.mbin+self.cbin)}
go2slim = {}
for w, a in [('gob','P'),('gom','F'),('goc','C')]:
sub = df[df['aspect']==a][['GO_id', 'GOSlim_bin']]
tmp = {}
for g, s in sub.values:
# tmp.setdefault(g,[]).append(s2e(s))
tmp.setdefault(g,[]).append(s)
go2slim[w] = tmp
self.go2slim = go2slim
def id2slim(self, id2gos, usecache=True):
if usecache:
cache = self.fname + '-id2slim.pic'
if os.path.exists(cache):
return load(open(cache,'rb'))
id2slim = {}
for w in ['gob','gom','goc']:
i2g = id2gos[w]
g2s = self.go2slim[w]
id2slim[w] = {i: set([y for x in i2g[i] for y in g2s.get(x,[])]) for i in i2g}
dump(id2slim, open(cache,'wb'))
return id2slim
class MGIGOAssociation(object):
FIELDS = ['database',
'accession_id',
'symbol',
'not_designation',
'go_id',
'mgi_ref_accession_id',
'evidence_code',
'inferred_from',
'ontology', # P=Biological Process, F=Molecular Function,C=Cellular Component
'name',
'synonyms',
'type', # gene, transcript, protein
'taxon',
'modification_date',
'assigned_by',
'annotation_ext',
'gene_product']
def __init__(self, fname='gene_association.txt'):
self.fname = fname
fobj = open(fname)
cnt = 0
for line in fobj:
cnt += 1
if line[0]!='!':
break
self.df = PD.read_table(fname, names=self.FIELDS, index_col=['go_id','symbol'], comment='!', skiprows=cnt-1)
def go2symbol(self, goid):
return list(set(self.df.xs(goid, level='go_id').index))
def gotree2symbol(self, goid, go):
gos = go.get_descendants(goid)
tmp = set()
for g in gos:
if g in self.df.index:
tmp = tmp.union(set(self.df.xs(g, level='go_id').index))
return list(tmp)
def symbol2go(self, symbol):
return list(set(self.df.xs(symbol, level='symbol').index))
def id2gos(self):
def make_id2go(ontology):
gos = self.df[self.df['ontology']==ontology]
symbols = list(set([x[1] for x in gos.index]))
return {s:list(set(gos.xs(s, level='symbol').index)) for s in symbols}
cache = self.fname+'-id2gos.pic'
if os.path.exists(cache):
return load(open(cache,'rb'))
id2gos = {}
for w,c in zip(['gob','gom','goc'],['P','F','C']):
print('making id => %s ...' % w)
id2gos[w] = make_id2go(c)
dump(id2gos, open(cache,'wb'))
return id2gos
class GO(object):
roots = dict(gom='GO:0003674',
goc='GO:0005575',
gob='GO:0008150')
__no_ref_to_other_class__ = True
def __init__(self, obo='gene_ontology.1_0.obo'):
"""
@param obo: path to GO obo file (ver1.0)
"""
self.obo = obo
self.dir = os.path.dirname(obo)
pname = '%s.pic' % obo
if os.path.exists(pname):
print( "loading GO from pickled file...")
dic = load(open(pname,'rb'))
self.__dict__.update(dic)
elif not os.path.exists(obo):
print( "specified GO file %s does not exists, GO object not initialized" % (obo,))
return
else:
print( "parsing GO file...")
txt = open(obo,'r').read()
#recs = re.split('^\n[\w+\]\s*\n',txt) # stanza section sep
item_re = re.compile('\n(\w+):')
recs = txt.split('\n\n')
recs = [x for x in recs if '[Term]\nid:' in x]
num = len(recs)
id2idx = {}
ilist = [None]*num
rlist = [None]*num
plist = [None]*num
nlist = [None]*num
dlist = ['']*num
clist = [[] for x in range(num)] # need to instantiate independent ones
for idx, rec in enumerate(recs):
items = item_re.split(rec)
is_a = []
alt_id = []
for k in range(1,len(items),2):
key = items[k].strip()
val = items[k+1].strip()
if key == 'id':
id = val
if key == 'name':
name = val
if key == 'def':
defi = val
if key == 'is_a':
is_a.append(val.split('!')[0].strip())
if key == 'alt_id':
alt_id.append(val)
if key == 'relationship':
key, val = val.split()[:2]
if key=='part_of':
is_a.append(val)
id2idx[id] = idx
for x in alt_id:
id2idx[x] = idx
ilist[idx] = id
rlist[idx] = rec
nlist[idx] = name
plist[idx] = is_a
dlist[idx] = defi
print( " finding reverse relation...")
for idx, parents in enumerate(plist):
for p in parents:
clist[id2idx[p]].append(idx)
plist[idx] = [id2idx[x] for x in parents]
print( " saving to pickled format...")
self.id2idx = id2idx
self.ids = ilist
self.parents = plist
self.children = clist
self.names = nlist
self.terms = rlist
self.defs = dlist
self.precalc_descendants()
dump(self.__dict__, open(pname,'wb'), 2)
def __getstate__(self):
return self.obo
def __setstate__(self, val):
self.__init__(val)
def find(self, regex, fld='terms'):
match = re.compile(regex).search
if fld not in ['names','terms', 'ids']:
raise KeyError
target = getattr(self, fld)
return [id for id, val in zip(self.ids, target) if match(val)]
def get_parents(self, id):
ids = self.ids
idx = self.id2idx[id]
return [ids[x] for x in self.parents[idx]]
def get_children(self, id):
ids = self.ids
idx = self.id2idx[id]
return [ids[x] for x in self.children[idx]]
def get_ancestors(self, id):
tmp = set()
for x in self.get_parents(id):
tmp.add(x)
tmp = tmp.union(set(self.get_ancestors(x)))
return list(tmp)
def get_descendants(self, id):
tmp = {id}
for x in self.get_children(id):
tmp = tmp.union(set(self.get_descendants(x)))
return list(tmp)
def _descendants(self, idx):
tmp = {idx}
children = self.children
for x in children[idx]:
tmp = tmp.union(set(self._descendants(x)))
return list(tmp)
def precalc_descendants(self):
self.descendants = [self._descendants(x) for x in range(len(self.ids))]
def get_name(self, id):
return self.names[self.id2idx[id]]
def get_term(self, id):
return self.terms[self.id2idx[id]]
def get_def(self, id):
return self.defs[self.id2idx[id]].replace('\\','').replace('"','')
def get_roots(self):
tmp = [i for i,x,y in izip(range(len(self.parents)),self.parents,self.children) if (len(x)==0 and len(y)>0)]
return [self.ids[x] for x in tmp]
def get_singletons(self):
tmp = [i for i,x,y in izip(range(len(self.parents)),self.parents,self.children) if (len(x)==0 and len(y)==0)]
return [self.ids[x] for x in tmp]
def get_root_id(self, which):
return self.roots[which]
def get_minlevels(self):
x2l = getattr(self, 'minlevels', None)
if x2l:
return x2l
x2c = self.children
i2x = self.id2idx
x2l = [-1]*len(x2c) # minimum level
def _calc(node, level):
if x2l[node]>0: # if already visited through different parent(s)
minlevel = min(x2l[node], level) # choose smaller
else:
minlevel = level # first time
x2l[node] = minlevel
for x in x2c[node]:
_calc(x, minlevel+1)
for root in self.roots.values():
_calc(i2x[root], 0)
self.minlevels = x2l
return x2l
def get_maxlevels(self):
x2l = getattr(self, 'maxlevels', None)
if x2l:
return x2l
x2c = self.children
i2x = self.id2idx
x2l = [-1]*len(x2c) # maximum level
def _calc(node, level):
if x2l[node]>0: # if already visited through different parent(s)
maxlevel = max(x2l[node], level) # choose bigger
else:
maxlevel = level # first time
x2l[node] = maxlevel
for x in x2c[node]:
_calc(x, maxlevel+1)
for root in self.roots.values():
_calc(i2x[root], 0)
self.maxlevels = x2l
return x2l
def get_spanning_tree_min(self, idx, p2c=True):
"""
Creat a spanning tree by choosing a unique parent by its min level.
A child will be assigned to a parent corresponding to the minimum level.
If there are multiple parents with a same minimum level, then intrinsic
ordering of go.parents (which is determined by geneontology.obo term ordering)
will be used.
Returns a tree structure expressed in a dict of id => parent id.
idx is used for id rather than GOid ("GO:xxxxxxxx").
To get GO id: self.ids[idx]
To get GO name: self.names[idx]
"""
rslt = {}
x2l = self.get_minlevels()
minlevel = x2l[idx]
# choose parent
candidate = [x for x in self.parents[idx] if x2l[x]==(minlevel-1)]
if len(candidate)>0: # at least there should be one if not root or isolated node
rslt[idx] = candidate[-1] # if multiple choose the left most, this should
else:
rslt[idx] = None # None indicates root
# always the same, if using the same obo file
# now get idx=> parent map from children and concatenate them
for c in self.children[idx]:
rslt.update(self.get_spanning_tree_min(c, False))
if not p2c:
return rslt
rslt2 = {}
for idx, pidx in rslt.iteritems():
rslt2.setdefault(pidx,[]).append(idx)
return rslt2
def get_spanning_tree_max(self, idx, p2c=True):
rslt = {}
x2l = self.get_maxlevels()
maxlevel = x2l[idx]
# choose parent
candidate = [x for x in self.parents[idx] if x2l[x]==(maxlevel-1)]
if len(candidate)>0: # at least there should be one if not root or isolated node
rslt[idx] = candidate[-1] # if multiple choose the left most, this should
else:
rslt[idx] = None # None indicates root
# always the same, if using the same obo file
# now get idx=> parent map from children and concatenate them
for c in self.children[idx]:
rslt.update(self.get_spanning_tree_max(c, False))
if not p2c:
return rslt
rslt2 = {}
for idx, pidx in rslt.iteritems():
rslt2.setdefault(pidx,[]).append(idx)
return rslt2
| mit |
pravsripad/mne-python | examples/decoding/plot_ems_filtering.py | 9 | 4717 | """
==============================================
Compute effect-matched-spatial filtering (EMS)
==============================================
This example computes the EMS to reconstruct the time course of the
experimental effect as described in :footcite:`SchurgerEtAl2013`.
This technique is used to create spatial filters based on the difference
between two conditions. By projecting the trial onto the corresponding spatial
filters, surrogate single trials are created in which multi-sensor activity is
reduced to one time series which exposes experimental effects, if present.
We will first plot a trials x times image of the single trials and order the
trials by condition. A second plot shows the average time series for each
condition. Finally a topographic plot is created which exhibits the temporal
evolution of the spatial filters.
"""
# Author: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import EMS, compute_ems
from sklearn.model_selection import StratifiedKFold
print(__doc__)
data_path = sample.data_path()
# Preprocess the data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_ids = {'AudL': 1, 'VisL': 3}
# Read data and create epochs
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 45, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_ids, tmin=-0.2, tmax=0.5, picks=picks,
baseline=None, reject=dict(grad=4000e-13, eog=150e-6),
preload=True)
epochs.drop_bad()
epochs.pick_types(meg='grad')
# Setup the data to use it a scikit-learn way:
X = epochs.get_data() # The MEG data
y = epochs.events[:, 2] # The conditions indices
n_epochs, n_channels, n_times = X.shape
#############################################################################
# Initialize EMS transformer
ems = EMS()
# Initialize the variables of interest
X_transform = np.zeros((n_epochs, n_times)) # Data after EMS transformation
filters = list() # Spatial filters at each time point
# In the original paper, the cross-validation is a leave-one-out. However,
# we recommend using a Stratified KFold, because leave-one-out tends
# to overfit and cannot be used to estimate the variance of the
# prediction within a given fold.
for train, test in StratifiedKFold(n_splits=5).split(X, y):
# In the original paper, the z-scoring is applied outside the CV.
# However, we recommend to apply this preprocessing inside the CV.
# Note that such scaling should be done separately for each channels if the
# data contains multiple channel types.
X_scaled = X / np.std(X[train])
# Fit and store the spatial filters
ems.fit(X_scaled[train], y[train])
# Store filters for future plotting
filters.append(ems.filters_)
# Generate the transformed data
X_transform[test] = ems.transform(X_scaled[test])
# Average the spatial filters across folds
filters = np.mean(filters, axis=0)
# Plot individual trials
plt.figure()
plt.title('single trial surrogates')
plt.imshow(X_transform[y.argsort()], origin='lower', aspect='auto',
extent=[epochs.times[0], epochs.times[-1], 1, len(X_transform)],
cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Trials (reordered by condition)')
# Plot average response
plt.figure()
plt.title('Average EMS signal')
mappings = [(key, value) for key, value in event_ids.items()]
for key, value in mappings:
ems_ave = X_transform[y == value]
plt.plot(epochs.times, ems_ave.mean(0), label=key)
plt.xlabel('Time (ms)')
plt.ylabel('a.u.')
plt.legend(loc='best')
plt.show()
# Visualize spatial filters across time
evoked = EvokedArray(filters, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(time_unit='s', scalings=1)
#############################################################################
# Note that a similar transformation can be applied with ``compute_ems``
# However, this function replicates Schurger et al's original paper, and thus
# applies the normalization outside a leave-one-out cross-validation, which we
# recommend not to do.
epochs.equalize_event_counts(event_ids)
X_transform, filters, classes = compute_ems(epochs)
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
tyarkoni/pliers | pliers/extractors/image.py | 2 | 5330 | '''
Extractors that operate primarily or exclusively on Image stimuli.
'''
from functools import partial
import numpy as np
import pandas as pd
from pliers.stimuli.image import ImageStim
from pliers.extractors.base import Extractor, ExtractorResult
from pliers.utils import attempt_to_import, verify_dependencies, listify
from pliers.support.due import due, Url, Doi
cv2 = attempt_to_import('cv2')
face_recognition = attempt_to_import('face_recognition')
class ImageExtractor(Extractor):
''' Base Image Extractor class; all subclasses can only be applied to
images. '''
_input_type = ImageStim
class BrightnessExtractor(ImageExtractor):
''' Gets the average luminosity of the pixels in the image '''
VERSION = '1.0'
def _extract(self, stim):
data = stim.data
brightness = np.amax(data, 2).mean() / 255.0
return ExtractorResult(np.array([[brightness]]), stim, self,
features=['brightness'])
class SharpnessExtractor(ImageExtractor):
''' Gets the degree of blur/sharpness of the image '''
VERSION = '1.0'
def _extract(self, stim):
verify_dependencies(['cv2'])
# Taken from
# http://stackoverflow.com/questions/7765810/is-there-a-way-to-detect-if-an-image-is-blurry?lq=1
data = stim.data
gray_image = cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)
sharpness = np.max(
cv2.convertScaleAbs(cv2.Laplacian(gray_image, 3))) / 255.0
return ExtractorResult(np.array([[sharpness]]), stim, self,
features=['sharpness'])
class VibranceExtractor(ImageExtractor):
''' Gets the variance of color channels of the image '''
VERSION = '1.0'
def _extract(self, stim):
data = stim.data
vibrance = np.var(data, 2).mean()
return ExtractorResult(np.array([[vibrance]]), stim, self,
features=['vibrance'])
class SaliencyExtractor(ImageExtractor):
''' Determines the saliency of the image using Itti & Koch (1998) algorithm
implemented in pySaliencyMap '''
@due.dcite(Doi("10.1109/34.730558"),
description="Image saliency estimation",
path='pliers.extractors.image.SilencyExtractor',
tags=["implementation"])
def _extract(self, stim):
from pliers.external.pysaliency import pySaliencyMap
# pySaliencyMap from https://github.com/akisato-/pySaliencyMap
# Initialize variables
h, w, c = stim.data.shape
sm = pySaliencyMap.pySaliencyMap(h, w)
# Compute saliency maps and store full maps as derivatives
stim.derivatives = dict()
stim.derivatives['saliency_map'] = sm.SMGetSM(stim.data)
stim.derivatives['binarized_map'] = sm.SMGetBinarizedSM(
stim.data) # thresholding done using Otsu
# Compute summary statistics
output = {}
output['max_saliency'] = np.max(stim.derivatives['saliency_map'])
output['max_y'], output['max_x'] = [list(i)[0] for i in np.where(
stim.derivatives['saliency_map'] == output['max_saliency'])]
output['frac_high_saliency'] = np.sum(
stim.derivatives['binarized_map']/255.0)/(h * w)
return ExtractorResult(np.array([list(output.values())]), stim, self,
features=list(output.keys()))
class FaceRecognitionFeatureExtractor(ImageExtractor):
_log_attributes = ('face_recognition_kwargs',)
def __init__(self, **face_recognition_kwargs):
verify_dependencies(['face_recognition'])
self.face_recognition_kwargs = face_recognition_kwargs
func = getattr(face_recognition.api, self._feature)
self.func = partial(func, **face_recognition_kwargs)
super().__init__()
def get_feature_names(self):
return self._feature
def _extract(self, stim):
values = self.func(stim.data)
feature_names = listify(self.get_feature_names())
return ExtractorResult(values, stim, self, features=feature_names)
def _to_df(self, result):
cols = listify(self._feature)
return pd.DataFrame([[r] for r in result._data], columns=cols)
class FaceRecognitionFaceEncodingsExtractor(FaceRecognitionFeatureExtractor):
''' Uses the face_recognition package to extract a 128-dimensional encoding
for every face detected in an image. For details, see documentation for
face_recognition.api.face_encodings. '''
_feature = 'face_encodings'
class FaceRecognitionFaceLandmarksExtractor(FaceRecognitionFeatureExtractor):
''' Uses the face_recognition package to extract the locations of named
features of faces in the image. For details, see documentation for
face_recognition.api.face_landmarks.'''
_feature = 'face_landmarks'
def _to_df(self, result):
data = pd.DataFrame.from_records(result._data)
data.columns = ['{}_{}'.format(self._feature, c) for c in data.columns]
return data
class FaceRecognitionFaceLocationsExtractor(FaceRecognitionFeatureExtractor):
''' Uses the face_recognition package to extract bounding boxes for all
faces in an image. For details, see documentation for
face_recognition.api.face_locations. '''
_feature = 'face_locations'
| bsd-3-clause |
SeanTater/uncc2014watsonsim | scripts/svm_graph.py | 2 | 2260 | import numpy as np
import time
import math
from sklearn.svm import SVC
wout = np.load("wek2.npy")[:10000]
# X = input features
# y = output gold-standard prediction
# q = question id (for collation)
X, y, q = np.delete(wout, [8, 46], axis=1), wout[:, 8], wout[:, 46]
# border between test and training data
border = len(y) * 2/3
# Spacing of the parameters we are trying to visualize (C and gamma)
base = 10 ** (1/10.)
exp_range = range(-60, 61)
def svc((C, gamma)):
s = SVC(C=C, gamma=gamma, probability=True)
start = time.time()
s.fit(X[:border], y[:border])
train_time = time.time() - start
pred = s.predict_proba(X[border:])[:, 0]
test_time = (time.time() - start) - train_time
# This is the literal is-it-the-right-answer binary score.
# This measure is what we try to maximize but its relation to question
# accuracy is complicated
accu = np.sum((pred > 0.5) == y) / len(y)
### This is the actual question prediction error, in bits
# First, find the probabilities
pred_y = pred * y[border:] # These are the probabilities for right answers
pred_y = pred_y[pred_y.nonzero()] # the same, stripped of 0's
mean_bits = np.mean(-np.log(pred_y) / np.log(2)) # measured in mean bits
### This is the literal accuracy - it gets complicated
# Sort the answers by probability, descending (only getting the indices)
confidence_order = np.argsort(pred)
# This indexing trick always takes the last assignment for each index
# This will hold the index of the best answer for each question
best_answer = np.zeros(np.max(q.astype(int))+1)
best_answer[q[confidence_order].astype(int)] = confidence_order
# Take the average correctness of the best answer
accu_by_q = y[border:][best_answer.astype(int)].mean()
return [C, gamma, accu, mean_bits, accu_by_q, train_time, test_time]
import code
def multi():
from multiprocessing import Pool
p = Pool(40)
ins = [(base**i, base**j) for i in exp_range for j in exp_range]
with open("svmresults-largeimage-smallset.log", "w") as o:
for row in p.imap_unordered(svc, ins):
print '\t'.join(map(str, row))
o.write('\t'.join(map(str, row)) + '\n')
code.interact(local=vars())
| gpl-2.0 |
bradysalz/MinVAD | classifier/training_helpers.py | 1 | 3449 | # -*- coding: utf-8 -*-
"""
Helper functions for classification and quantization
Created on Mon Dec 5 14:50:27 2016
@author: brady
"""
import os
import numpy as np
from sklearn.tree import tree, _tree
def quantize(data, precision):
"""
Turns floating point into fixed point data
:param data: vector to quantize, assumes np-array
:param precision: number of fixed points bits to used
:returns: vector of length[data], with precision bits
"""
data = np.array(data)
data = data*1e5
xmax = np.amax(np.abs(data))
#if xmax <= 0:
# xmax = 0.000001 # helps with stability
xq = xmax * np.minimum(
np.round(data*(2**(precision-1))/xmax) / (2**(precision-1)),
1-1/(2**(precision-1))
)
return xq/1e5
def tree_to_code(tree, feature_names, precision):
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
valid_thresh = [
t if t > 0 else np.min(np.abs(tree_.threshold))
for t in tree_.threshold
]
quant_thresh = quantize(valid_thresh, precision)
def recurse(node, depth, quant_tree_str):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = quant_thresh[node]
quant_tree_str += "{}if {} <= {}:\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_left[node], depth + 1, '')
quant_tree_str += "{}else: # if {} > {}\n".format(indent, name, threshold)
quant_tree_str += recurse(tree_.children_right[node], depth + 1, '')
return quant_tree_str
else:
quant_tree_str += "{}return {}\n".format(indent, np.argmax(tree_.value[node]))
return quant_tree_str
quant_tree_str = "def tree_{}b(features):\n".format(precision)
quant_tree_str = recurse(0, 1, quant_tree_str)
return quant_tree_str
def gen_quant_trees_str(tree, precisions):
func_list_str = ''
for p in precisions:
names = ['features['+str(x)+']' for x in range(20)]
func_list_str += tree_to_code(tree, names, p)
func_list_str += "##################################################\n"
return func_list_str
def make_quant_trees_module(filename, tree, precisions):
trees_str = gen_quant_trees_str(tree, precisions)
with open(filename, 'w') as f:
f.write(trees_str)
def get_tree_results(tree, Xtest):
"""
Runs data through a quantized DecisionTreeClassifier
:param tree: DTC function handle
:param Xtest: data to test
:returns: predicted results
"""
results = [tree(X) for X in Xtest]
return np.array([results], ndmin=1).T
if __name__ == '__main__':
DIR = r'C:\Users\brady\GitHub\MinVAD\feature_extract'
tr_data = np.load(os.path.join(DIR, 'train_130k.npy'))
tr_class = np.load(os.path.join(DIR, 'train_130k_class.npy'))
myData = np.hstack((tr_data, tr_class))
np.random.shuffle(myData)
cutoff = int(np.floor(0.8 * len(tr_class)))
clf = tree.DecisionTreeClassifier(max_depth = 5)
clf = clf.fit(myData[:cutoff, :19], myData[:cutoff, 20])
test_str = gen_quant_trees_str(clf, np.arange(16, 15, -1))
print(test_str) | mit |
chngchinboon/intercomstats | scripts/LLdataframe.py | 1 | 43472 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 16 16:19:36 2016
########################################################################################################
#Current bugs
########################################################################################################
#Possible errors in overalresponse
#Possible errors in merging/updating local database
#missing issues plot has errors. reports untagged conversations, but in reality
#they're tagged. Clusters of errors usually signify errors. possible local db not updated properly.
#known flaws 28/12/2016
#tags only at top level
#multiple schools(m) with multi issue(n) will result in m*n conversations in augmenteddf
########################################################################################################
#To Do list
########################################################################################################
#current version
###### Clean up code #################
#arrange structure of code properly.
#currently it is a damn mess.
######################################
#next version
##### DataBase #######################
#not working well :(
#build offline database for each intercom model. Done
#at every start, check for difference, merge if possible. Done
#check for match every iteration since find_all is based on last_update. Done
#once a threshold of numerous no change matches, abort? hard to decide the threshold. Done
#perhaps average conversations a week + pid control based on previous week's number
#build each as a separate function? Done
######################################
##### Class for conversation details #####
#Build class constructor that can accept object from intercom to turn into a dataframe
#handle both models of conversation_message and conversation_parts
##########################################
@author: Boon
"""
#%%
import pandas as pd
import datetime
import os.path
import sys
from intercom.client import Client
import time
#config data
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir)))
from configs import pat#, smalllogo, largelogo
#Custom functions
import plotfunc as pltf
import augfunc as af
outputfolder=os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, 'output'))
timenow=datetime.datetime.now()
timenowepoch=(timenow- datetime.datetime(1970,1,1)).total_seconds()
#%%can check last updated at vs old dataframe to check for changes.
#use that to pull the conversation and append to convdf instead of rebuilding df.
intercom = Client(personal_access_token=pat)
#List of file paths of local copy of data files
convstatsf=os.path.abspath(os.path.join(outputfolder,'convstats.csv'))
topconvstatsf=os.path.abspath(os.path.join(outputfolder,'topconvstats.csv'))
userf=os.path.abspath(os.path.join(outputfolder,'user.csv'))
filelist=[convstatsf,topconvstatsf,userf]
#Output (for debugging)
output=True
toplot=False
#Initializae empty list for status of local data files
rebuild=[[],[],[]]
#attributes types in local data files
#datestime values from intercom is in UTC +0.
datetimeattrlist=['created_at','first_response','first_closed','last_closed','updated_at','created_at_EOD']
datetimeattrspltlist=['created_at_Date','created_at_Time']
timedeltaattrlist=['s_to_first_response','s_to_first_closed','s_to_last_closed','s_to_last_update']
#<unresolved potential bug> load the files if rebuild is off #coerce may cause potential bugs !!!!!!!!!!!!!!
#function for loading local data files
def loaddffiles(filelocation,loadmode=1):
"""load csv files loadmode:
0 = just load,
1 (default) = load and update check ,
2 = don't load & full rebuild
"""
#pre categorization of attributes
datetimeattrlist=['created_at','first_response','first_closed','last_closed','updated_at']#attributes that are basic datetime types
datetimeattrspltlist=['created_at_Date','created_at_Time']#attributes that are split, differentiated because may require different method of manipulation
timedeltaattrlist=['s_to_first_response','s_to_first_closed','s_to_last_closed','s_to_last_update']#attributes that are tiemdeltas types
#check if file exists. if doesn't exist need to force rebuild anyway.
if os.path.isfile(filelocation):#update if exists
print ('Found file at ' +filelocation)
else: #rebuild if doesn't exist
print ('Unable to find file at ' +filelocation)
loadmode=2
if loadmode==0 or loadmode==1:
outputdf=pd.read_csv(filelocation, sep='\t', encoding='utf-8',index_col=False)
if hasattr(outputdf, u'Unnamed: 0'): del outputdf['Unnamed: 0']#might be hiding poorly merge attempts
if hasattr(outputdf, u'Unnamed: 0.1'): del outputdf['Unnamed: 0.1']#might be hiding poorly merge attempts
if hasattr(outputdf, 'convid'): outputdf['convid']=outputdf['convid'].astype('unicode')#loading auto changes this to int
if hasattr(outputdf, 'assignee'): outputdf['assignee']=outputdf['assignee'].astype('unicode')#loading auto changes this to int
for item in datetimeattrlist+datetimeattrspltlist:#process datetime attributes into datetime
if hasattr(outputdf, item): outputdf[item] = pd.to_datetime(outputdf[item],errors='coerce')
for item in timedeltaattrlist:#process timedelta attributes into timedelta
if hasattr(outputdf, item): outputdf[item] = pd.to_timedelta(outputdf[item],errors='coerce')
print ('Loaded file from ' + filelocation)
if loadmode==1:
rebuild=True
else:
rebuild=False
else:
print ('Forcing rebuild...')
rebuild=True
outputdf=None
return outputdf, rebuild
#load userdf
userdf, rebuild[2]=loaddffiles(userf,1)
if userdf is not None:
#force into types as if we loaded manually
#del userdf['Unnamed: 0']
userdf['anonymous']=userdf['anonymous'].astype(bool)
userdf['unsubscribed_from_emails']=userdf['unsubscribed_from_emails'].astype(bool)
userdf['session_count']=userdf['session_count'].astype('int64')
#might want to combine this with top
userdf['created_at']=pd.to_datetime(userdf['created_at'],errors='coerce')
userdf['last_request_at']=pd.to_datetime(userdf['last_request_at'],errors='coerce')
userdf['remote_created_at']=pd.to_datetime(userdf['remote_created_at'],errors='coerce')
userdf['signed_up_at']=pd.to_datetime(userdf['signed_up_at'],errors='coerce')
userdf['updated_at']=pd.to_datetime(userdf['updated_at'],errors='coerce')
#updated_at can be used to check if needs updating
print ('Loaded #'+str(len(userdf))+ ' users')
#load convdf
convdf, rebuild[0]=loaddffiles(convstatsf,1)
#['tags'] need to be split into a list
#assigned_to read in as float may throw an error when printing?
#msgid read in as int64
#notified_at read in as object
#part_type read in as object (unicode)
#subject read in as float64 because all NaN
#url read in as float64 because all NaN
#load topconvdf
topconvdf, rebuild[1]=loaddffiles(topconvstatsf,1)
if topconvdf is not None:
topconvdf['created_at_EOD']=pd.to_datetime(topconvdf['created_at_EOD'],errors='coerce')
print ('Loaded #'+str(len(topconvdf))+ ' conversations')
#school and tags read in as unicode
#%% Get admin info #small enough that can quickly get
#from intercom import Admin
admindf=pd.DataFrame([x.__dict__ for x in intercom.admins.all()])
print('Retrieved Admin Df from Intercom')
#split admin by country
sglist = []
with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,'sgadminlist.txt'))) as inputfile:
for line in inputfile:#load admin list from file. #future improvement required!
sglist.append(line.strip())
admindf_SG=admindf[admindf.name.isin(sglist)]
admindf_MY=admindf[~admindf.name.isin(sglist)]
countrylist=['Sg','My']
admindfbycountry=[admindf_SG,admindf_MY]
#%%Count stats from intercom
AdminCount=pd.DataFrame(intercom.counts.for_type(type='conversation', count='admin').conversation['admin'])
print('Retrieved AdminCount Df from Intercom')
#%% Get tags
#from intercom import Tag
tagdf=pd.DataFrame([x.__dict__ for x in intercom.tags.all()])
#load issue from file
issuename = []
with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,'issuelist.txt'))) as inputfile:
for line in inputfile:
issuename.append(line.strip())
#group tags by issue
issuetag=tagdf[tagdf.name.isin(issuename)]
#group tags by school
schooltag=tagdf[~tagdf.name.isin(issuename)]
print('Retrieved Issuetag and Schooltag Df from Intercom')
#%% Get Users ##########too large. need scrolling api
#loading from csv may not give recent info. need to pull from intercom for latest
#from intercom import User
userdatetimeattrlist=['created_at','last_request_at','remote_created_at','signed_up_at','updated_at']
def getfewusers(df, obj, num):#consider using updated_at to check if user needs to be updated!
userdatetimeattrlist=['created_at','last_request_at','remote_created_at','signed_up_at','updated_at']
tempuserdict=[]
eof=False
for idx in xrange(num): #get num of users
try:
#convert the python-intercom generator to a list of dicts to build the dataframe
tempuserdict.append(obj[0].__dict__.copy())
except Exception, err:#python-intercom generator throws an error upon end. Use that to identify end
print (err)
eof=True
break
#Build temp dataframe to check against existing dataframe(from local data)
tempuserdf=pd.DataFrame(tempuserdict)
#Find missing users
if df is None:#For handling empty/missing dataframe. Occurs when rebuilding from scratch
missinguserdf=tempuserdf.copy()
else:#slice only those missing
missinguserdf=tempuserdf[~tempuserdf.id.isin(df.id)].copy()
nummissing=len(missinguserdf)#number of missing
for attr in userdatetimeattrlist:#Force convert attributes to datetime
missinguserdf[attr]=pd.to_datetime(missinguserdf[attr],unit='s')
return missinguserdf, nummissing,eof
if rebuild[2]:
print('Retrieving recent users from Intercom. This may take awhile......')
getmore=True#temp flag to know if should retrieve more data from intercom
userobj=intercom.users.all()#python-intercom generator for user list
itercounter=1#counter variable for reporting status of retrieval
if userdf is None:#different rates for retrieval
print('Userdf missing. Rebuilding from scratch')
retrievenumi=100#higher rate so that its less spammy when reporting
else:
print('Userdf exists. Retrieving missing users from scratch')
retrievenumi=25
while getmore== True:
toget=retrievenumi*2**itercounter#exponentially increasing subset to retrieve
missinguserdf,nummissing,eof=getfewusers(userdf,userobj,toget)#retrieve a subset of users
print('Found '+str(nummissing)+'/'+str(toget)+' missing users.')
userdf=pd.concat([userdf, missinguserdf], ignore_index=True)#append missing users
print('Updated userdf')
itercounter+=1
if nummissing>10:#soft margin because will retrieve if found missing when retrieving conversations
getmore=True
print('Retrieving more to check')
else:
getmore=False
print('Missing users less than 10. Exiting while loop')
if eof:
getmore=False
print ('Need to wait for scrolling api to be added by python API dev.')
#might want to drop duplicates
#userdf.drop_duplicates('id',inplace=True)
#userdf.reset_index(inplace=True)
print('Completed retrieval of user')
#%% Get all conversations
#load issue from file
#custom texts to remove from conversation body.
texttoremove = []
with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,'textlist.txt'))) as inputfile:
for line in inputfile:
texttoremove.append(line.strip())
#function for getting subset of conversations
def getfewconv(df, convobj, num):
if df is not None:#<unresolved> Poor implementation. need to find better way.
latestupdate=df.updated_at.max()-pd.Timedelta('1 days') #1 day before the max of the df <--- depending on how often the script is run!!!!!!!!
else:
latestupdate=pd.to_datetime(0)#set to 0 so that will retrieve all.
tempdictlist=[]
eof=False
for idx in xrange(num): #get num of convs
try:
tempdictlist.append(convobj[0].__dict__.copy())
except Exception, err:
print (err)
eof=True
break
#Build dataframe for merging
tempconvdf=pd.DataFrame(tempdictlist)
#collect only those later than latestupdate
tempconvdf=tempconvdf[pd.to_datetime(tempconvdf.updated_at,unit='s') > latestupdate]
numtoupdate=len(tempconvdf)
if numtoupdate==0:
eof=True
return tempconvdf, numtoupdate, eof
if rebuild[1]:
if convdf is None:
print ('Convdf is empty. Rebuilding from scratch')
tomergedf=[]
convobj=intercom.conversations.find_all()#Python-intercom generator
getmore=True
retrievenumi=100
itercounter=1
updatenumaccu=0#Accumulator for overall number of conversations to be updated
while getmore== True:
toget=retrievenumi*2**itercounter#exponentially increase number of conversations to get
tomerge,numtoupdated,eof=getfewconv(topconvdf,convobj,toget)
print('Found total '+str(numtoupdated)+'/'+str(toget)+' conversations in this set that needs updating.')
if tomerge is not None:
if itercounter==1:#handle first loop
tomergedf=tomerge.copy()
else:
tomergedf=tomergedf.append(tomerge)
if numtoupdated!=0:
getmore=True
print('Retrieving more to check')
else:
getmore=False
print('Found no conversations to update. Exiting while loop')
if eof:#may be redundant (useful only if instead of a hard stop, using a threshold)
getmore=False
print ('Reached eof')
itercounter+=1
updatenumaccu+=numtoupdated#update accumulator
print('Completed retrieval.')
print('Total of #'+str(updatenumaccu)+' conversations that needs to be updated.')
totalconv=len(tomergedf)#total conversations to be merged
#format columns into what is required
tomergedf.assignee=tomergedf.assignee.apply(lambda s: s.id)
tomergedf['adminname']=tomergedf.assignee.apply(lambda s: af.getadminname(s,admindf))
tomergedf.user=tomergedf.user.apply(lambda s: s.id)
tomergedf=tomergedf.rename(columns={ 'id' : 'convid'})
del tomergedf['changed_attributes']
for attr in userdatetimeattrlist:#may be redundant
try:
tomergedf[attr]=pd.to_datetime(tomergedf[attr],unit='s')
except KeyError:
pass
#Since userdf depends on scroll api, may be missing users from intercom
#scan through list of conversations to augment tomergedf with username and email.
itercounter=1
missinguserdf=0
df=[]
for index, row in tomergedf.iterrows():
try:
if itercounter%(int(totalconv/10))==0:#display progress counter
print('Processed ' + str(itercounter)+'/'+str(totalconv) + ' conversations')
except ZeroDivisionError:
pass
userid=row.user
if row.user!=None:
try:
idxdf=userdf['id']==userid#count number of occurance
except TypeError:#incase idxdf is empty
idxdf=[0]
if sum(idxdf)>1:#duplicate user entry. need to purge
print('Duplicate user entry found. Please check csv/intercom')
if sum(idxdf)==0:#ask intercom
#print('Missing user '+str(userid)+' from dataframe. Retrieving from Intercom instead')
userdetails=intercom.users.find(id=userid).__dict__.copy() #convert to dict for storage into df
#convert to df for merging
userdetails=pd.DataFrame([userdetails])#need to place in list mode. possible source of error
#convert datetime attributes to datetime objects
for attr in userdatetimeattrlist:
userdetails[attr]=pd.to_datetime(userdetails[attr],unit='s')
#append to userdf
userdf=userdf.append(userdetails,ignore_index=True)
missinguserdf+=1
#userdetails=userdetails[['name','email']].iloc[0].tolist()
else:#to handle multiple userid in userdf!!!!!! shouldn't be the case!!
userdetails=userdf[userdf['id']==userid]#.iloc[0]#.to_dict()#found in df, extract values out
#userdetails=userdetails[['name','email']].tolist()
#userdetails=userdetails[userdetails.keys()[0]]#format may be different. possible source of errors!!!!!!!!!!!!!!!! keys used because method returns a series
df.append(userdetails[['name','email']].iloc[0].tolist())
else:#handle empty user
print('Found empty user!!!')#need to troubleshoot #possibly deleted user/admin
df.append(['None','None'])
itercounter+=1
#df=pd.Series([dict(username=userdetails.get('name'),email=userdetails.get('email'),role=userdetails.get('role'))])
tomergedf=tomergedf.reset_index().merge(pd.DataFrame(df,columns=['username','email']),left_index=True, right_index=True)#probably wrong here df going crazy. update:30/1/17, merging properly now
if 'Unnamed:' in ''.join(tomergedf.columns.tolist()):
print('!!!!!!!!!!WARNING POOR MERGE DETECTED!!!!!!!!!!!!!!!!!!!!!!!!!')
print('Extracted all conversations to be merged')
print('Found #' + str(itercounter-1) + ' conversations with missing user info')
print('Found #'+ str(missinguserdf) + ' users missing from userdf')
print('Updated userdf')
else:
print ('Load All conversations from csv')
totalconv=len(topconvdf.index)
print('Total Conversations: ' + str(totalconv))
print('Time started: '+ str(datetime.datetime.now()))
#%% create another dataframe with all conversation parts for reference
#tt.tic()
attrnames=['author','created_at','body','id','notified_at','part_type','assigned_to','url','attachments','subject']
conv=[]
if rebuild[0]:
print('Retrieving full content of conversations from Intercom')
itercounter=1
#current implementation brute forces appending dicts and modify outside of loop.
#Not scalable :(
#bottleneck should still be querying from intercom
for convid in tomergedf.convid:
try:
if itercounter%(int(totalconv/10))==0:#display progress counter
print('Processing ' + str(itercounter)+'/'+str(totalconv) + ' conversations')
except ZeroDivisionError:
pass
#get valuves
convobj=intercom.conversations.find(id=convid) #return conversation object
#object already has datetime attributes that are in proper format. changing to dict causes them to turn into seconds. doesn't matter can change whole column into datetime when in df form
#message
conv_message=convobj.conversation_message.__dict__.copy()
conv_message['convid']=convid
conv_message['idx_conv']=0
#Missing
conv_message['notified_at']=None
conv_message['part_type']='initial'
conv_message['assigned_to']=None
#Modify attributes
conv_message['created_at']=convobj.created_at.replace(tzinfo=None)
conv_message['msgid']=conv_message['id']
del conv_message['id']
conv_message['author']=conv_message['author'].id
conv_message['tags']=convobj.tags
if conv_message['tags']:
#convert from list of tag objects into a list of tag names
conv_message['tags']=[temptagid.name for temptagid in conv_message['tags']]
#temptaglist=[]
#for numtag in conv_message['tags']:
# temptagid=numtag.id
# temptaglist.append(tagdf['name'][tagdf['id']==temptagid].item())
##conv_message['tags']=','.join(temptaglist) #incase need to convert to strlist
#conv_message['tags']=temptaglist
conv_message['body']=af.parsingconvtext(conv_message['body'],texttoremove)
#useless attributes
del conv_message['changed_attributes']
del conv_message['attachments']
#del conv_message['body'] #<-- tracking?
del conv_message['subject']
del conv_message['url']
#append to final list
conv.append(conv_message)
#part
for i,item in enumerate(convobj.conversation_parts):
conv_part=item.__dict__.copy()
conv_part['convid']=convid
conv_part['idx_conv']=i+1
#missing attributes
conv_part['subject']=None
conv_part['url']=None
#Modify attributes
conv_part['msgid']=conv_part['id']
del conv_part['id']
conv_part['created_at']=pd.to_datetime(conv_part['created_at'],unit='s')
conv_part['author']=conv_part['author'].id
if conv_part['assigned_to']:
conv_part['assigned_to']=conv_part['assigned_to'].id
try:
if conv_part['tags']:
temptaglist=[]
for numtag in conv_part['tags']:
temptagid=numtag.id
temptaglist.append(tagdf['name'][tagdf['id']==temptagid].item())
#conv_message['tags']=','.join(temptaglist)
conv_part['tags']=temptaglist
except KeyError:
conv_part['tags']=None
conv_part['body']=af.parsingconvtext(conv_part['body'],texttoremove)
#useless attributes
del conv_part['updated_at']
del conv_part['external_id']
del conv_part['changed_attributes']
#del conv_part['body']
del conv_part['attachments']
#append to final list
conv.append(conv_part)
#Just in case the constant requests trigger api limits.
ratelimit=intercom.rate_limit_details
try:
if ratelimit['remaining']<25:
print('Current rate: %d. Sleeping for 1 min' %ratelimit['remaining'])
time.sleep(60)
print('Resuming.....')
except KeyError:
continue
itercounter+=1
convdftomerge=pd.DataFrame(conv)
print('Built convdftomerge')
#convert possible datetime strings to datetime objects
if not convdftomerge.empty: #may not have anything to merge
convdftomerge['notified_at']=pd.to_datetime(convdftomerge['notified_at'],unit='s')
#merge here
if convdf is not None:
#update values in common rows
common=convdftomerge[convdftomerge.convid.isin(convdf.convid) & convdftomerge.idx_conv.isin(convdf.idx_conv)]
convdf.update(common)
#append missing rows
missing=convdftomerge[~(convdftomerge.convid.isin(convdf.convid) & convdftomerge.idx_conv.isin(convdf.idx_conv))]
convdf=convdf.append(missing)
print('Updated convdf with convdftomerge')
else:
print('Convdf empty, using convdftomerge instead')
convdf=convdftomerge
#convdf['created_at']=pd.to_datetime(convdf['created_at'],unit='s')
#split datetime into two parts so that can do comparison for time binning
af.splitdatetime(convdftomerge,datetimeattrlist)#<--- consider bringing down during augment
if 'Unnamed:' in ''.join(convdftomerge.columns.tolist()):
print('!!!!!!!!!!WARNING POOR MERGE DETECTED!!!!!!!!!!!!!!!!!!!!!!!!!')
else:
print ('Loaded Conversations from csv')
#tt.toc()
print('Time started: '+ str(datetime.datetime.now()))
#%% Augment data
if rebuild[1]:
if not tomergedf.empty:
print('Building additional info for each conversation')
toaugment=tomergedf.copy()#keep original so that don't mess
#getting conversation part stats
print('Getting Conversation part stats')
convpartstatsdf=toaugment.convid.apply(lambda s: af.getconvpartnum(s,convdf))
print('Conversation part stats df generated')
#get tags
print('Getting conversation school(s) and issue(s)')
issuenschooldf=toaugment.convid.apply(lambda s: pd.Series({'numtags': len(af.gettotaltags(s,convdf)),
'issue': af.getissue(s,convdf,issuetag),#duplicate
'school': af.getschool(s,convdf,schooltag)#duplicate
}))
print('School and issue df generated')
#get time info
print('Generating key time stats')
generateddf=toaugment.convid.apply(lambda s: af.getkeytimestats(s,convdf))
print('first_response, first_closed and last_closed df generated')
#some missing values need to change to be able to manipulate
for item in datetimeattrlist+datetimeattrspltlist:
#if hasattr(generateddf, item): generateddf[item] = pd.to_datetime(generateddf[item],unit='s',errors='coerce') #weird bug here. cannot coerce, will force everything to nat
if hasattr(generateddf, item): generateddf[item] = pd.to_datetime(generateddf[item])
#get response,firstclose,lastclose timedelta
print('Getting timedeltas')
tdeltadf=generateddf[['first_response', 'first_closed','last_closed']].sub(toaugment['created_at'], axis=0)
tdeltadf.columns = ['s_to_first_response', 's_to_first_closed','s_to_last_closed']
tudeltadf=toaugment[['updated_at']].sub(toaugment['created_at'], axis=0)
tudeltadf.columns = ['s_to_last_update']
print('Timedelta for first_response, first_closed, last_closed, updated_at, generated')
#concat them together
toaugment=pd.concat([toaugment,convpartstatsdf,issuenschooldf,generateddf,tdeltadf,tudeltadf], axis=1)
print('Additional info for each conversation')
#change open from bool to int for easier understanding
toaugment['open']=toaugment.open.apply(lambda s: s*1)
#count issues
toaugment['numissues']=toaugment.issue.apply(lambda s: af.countissue(s))
#bintime for pivot tables
#responsebinlist=[0,1,2,3,4,365*24]
responsebinlist=[0,1,2,3,365*24]
resolvebinlist=[0,1,2,3,4,12,24,365*24]
#toaugment['s_response_bin']=toaugment.s_to_first_response.apply(lambda s: af.bintime(s,'h',responsebinlist,0))
#responsecolumnlabels=['0-1','1-2','2-3','3-4','>4','UN']
responsecolumnlabels=['0-1','1-2','2-3','>3','UN']
resolvecolumnlabels=['0-1','1-2', '2-3','3-4','4-12','12-24','>24','UN']
#bin response
tempbin=pd.cut(toaugment.s_to_first_response.dt.total_seconds(),[i*3600 for i in responsebinlist],labels=responsecolumnlabels[:-1])
#replace NaN with UN
toaugment['s_response_bin']=tempbin.cat.add_categories(responsecolumnlabels[-1]).fillna(responsecolumnlabels[-1])
#bin resolve
tempbin=pd.cut(toaugment.s_to_last_closed.dt.total_seconds(),[i*3600 for i in resolvebinlist],labels=resolvecolumnlabels[:-1])
#replace NaN with UN
toaugment['s_resolve_bin']=tempbin.cat.add_categories(resolvecolumnlabels[-1]).fillna(resolvecolumnlabels[-1])
#split datetime for created_at into two parts so that can do comparison for time binning
af.splitdatetime(toaugment,datetimeattrlist[0])
#add end of created day
toaugment['created_at_EOD']=toaugment.created_at_Date.apply(lambda s: s+pd.Timedelta('1 days')+pd.Timedelta('-1us'))
#add first message text
toaugment['firstmessage']=toaugment.convid.apply(lambda s: af.getfirstmessage(s,convdf))
#merge the missing files!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! columns of missing and temptopconvdf different!!!!!!!!!need to check!!!! appending is screwing things up!
#missing is missing username(converted name from id)
#missing role
#extra numopened <-- last minute addition. csv file may not have
#missing email
#extra conversation message
#extra changed_attributes
if topconvdf is not None:
#update values in common rows
##indexes of tomergedf is based on how intercom arranges the info from conversation_findall.
#has no meaning and relation to the topconvdf index.
#Have to use convid as the index instead.
common=toaugment[toaugment.convid.isin(topconvdf.convid)]
temptopconvdf=topconvdf.set_index('convid').copy()
temptopconvdf.update(common.set_index('convid',inplace=True))
#temptopconvdf.reset_index(drop=True,inplace=True)
temptopconvdf.reset_index(inplace=True)
#append missing rows
missing=toaugment[~toaugment.convid.isin(topconvdf.convid)]
topconvdfcopy=temptopconvdf.append(missing)
print('Updated topconvdfcopy with toaugment')
else:
print('topconvdf empty, using toaugment instead')
topconvdfcopy=toaugment
topconvdfcopy.reset_index(drop=True,inplace=True)
if 'Unnamed:' in ''.join(topconvdfcopy.columns.tolist()):
print('!!!!!!!!!!WARNING POOR MERGE DETECTED!!!!!!!!!!!!!!!!!!!!!!!!!')
else:
topconvdfcopy=topconvdf.copy()
print('tomergedf empty. Skipping augmentation')
#rename so that it doesn't conflict when pulling conversation parts
#topconvdf=topconvdf.rename(columns={ 'id' : 'convid'})
#convert columns with datetime strings to datetime objects
##redundant!!! removed for now
#topconvdfcopy['updated_at']=pd.to_datetime(topconvdfcopy['updated_at'],unit='s')
#topconvdfcopy['created_at']=pd.to_datetime(topconvdfcopy['created_at'],unit='s')
#convert timedelta obj to timedelta
#for item in timedeltaattrlist:
# if hasattr(topconvdfcopy, item): topconvdfcopy[item] = pd.to_timedelta(topconvdfcopy[item],errors='coerce')
else:
topconvdfcopy=topconvdf.copy()
if not hasattr(topconvdfcopy,'created_at'):
af.splitdatetime(topconvdfcopy,datetimeattrlist[0])
#lists are read in as string. need to convert back so that can process. should move to common procedure when first loading in!!!!!!!!!!!!!!!!!!!!!!!!!!!
str2listdf=topconvdfcopy.convid.apply(lambda s: pd.Series({'issue': af.getissue(s),'school': af.getschool(s)})) #duplicate
#cheating abit here. instead of processing string within adminconvdfcopy, getting entire data from convdf
del topconvdfcopy['issue']
del topconvdfcopy['school']
topconvdfcopy=topconvdfcopy.merge(str2listdf, left_index=True, right_index=True)
print('Metrics loaded from csv')
#%% Plotting
#%%group by tf
print('Generating plots')
print('Time started: '+ str(datetime.datetime.now()))
#timeframe=[7,30,180,365]
#list of timeframes in days
timeframeend=[0,8,0,31,0,0]#[w1,w2,m1,m2,0.5y,1y]
timeframestart=[7,15,30,61,180,365]
#time frames start and ends in datetime objects
timeframestartdt=[timenow.date()-datetime.timedelta(dt) for dt in timeframestart]
timeframeenddt=[timenow.date()-datetime.timedelta(dt) for dt in timeframeend]
#for debugging
#timeinterval=[timeframestartdt[0],timeframeenddt[0]]
#ofilename='test'
#change none to string so that can group
topconvdfcopy['issue']=topconvdfcopy.issue.apply(lambda s: af.changenonetostr(s))
topconvdfcopy['school']=topconvdfcopy.school.apply(lambda s: af.changenonetostr(s))
topconvdfcopy.adminname=topconvdfcopy.adminname.apply(lambda s: af.changenonetostr(s,'Unassigned'))
topconvdfcopy.adminname.fillna('Unassigned',inplace=True)
#make copy for plotting (or else conversion to local time will offset data to be saved)
topconvdfcopyutc=topconvdfcopy.copy()
#need to convert utc time to local
for item in datetimeattrlist:
if hasattr(topconvdfcopyutc, item): topconvdfcopyutc[item] = topconvdfcopyutc[item]+pd.Timedelta('8 hours')
#resplit to update
af.splitdatetime(topconvdfcopyutc,['created_at'])
topconvdfcopyutc['created_at_EOD']=topconvdfcopyutc.created_at_Date.apply(lambda s: s+pd.Timedelta('1 days')+pd.Timedelta('-1us'))
issueschoolexpandeddf=pltf.expandtag(pltf.expandtag(topconvdfcopyutc,'issue'),'school')
Alloutdisable=False
pltsilent=[True,True]
#save folders
foldername=timenow.strftime("%Y%m%d-%H%M%S")
pathbackup=os.path.abspath(os.path.join(outputfolder,foldername))
try:
os.makedirs(pathbackup)
except OSError:
if not os.path.isdir(pathbackup):
raise
for idx,country in enumerate(countrylist):
#split by country
if idx==0:
tempexpanded=issueschoolexpandeddf[(issueschoolexpandeddf.adminname.isin(admindfbycountry[idx].name))|(issueschoolexpandeddf.adminname.isnull())]
temptopconvdfcopy=topconvdfcopyutc[(topconvdfcopyutc.adminname.isin(admindfbycountry[idx].name))|(topconvdfcopyutc.adminname.isnull())]
else:
tempexpanded=issueschoolexpandeddf[(issueschoolexpandeddf.adminname.isin(admindfbycountry[idx].name))]
temptopconvdfcopy=topconvdfcopyutc[(topconvdfcopyutc.adminname.isin(admindfbycountry[idx].name))]
subfolderpath=os.path.abspath(os.path.join(outputfolder,foldername,country))
try:
os.makedirs(subfolderpath)
except OSError:
if not os.path.isdir(subfolderpath):
raise
outputstats=True
if outputstats & ~Alloutdisable:
pltf.agpgen(tempexpanded, [timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'Weeklyemail.xlsx')),responsecolumnlabels,resolvecolumnlabels)
plotallconvobyadmin=True
if plotallconvobyadmin & ~Alloutdisable:
pltf.allconvobyadminplot(tempexpanded,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'tagsbyAdmin_1W_'+country)),silent=pltsilent[idx])
plotoveralltags=True
if plotoveralltags & ~Alloutdisable:
pltf.overalltagplot(tempexpanded,[timeframestartdt[2],timeframeenddt[2]],os.path.abspath(os.path.join(subfolderpath,'Overalltagsformonth_'+country)),silent=pltsilent[idx])
pltf.overalltagplot(tempexpanded,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'Overalltagsforweek_'+country)),silent=pltsilent[idx])
pltf.overalltagplot2(tempexpanded,[[timeframestartdt[3],timeframeenddt[3]],[timeframestartdt[2],timeframeenddt[2]]],os.path.abspath(os.path.join(subfolderpath,'Overalltagsforpast2month_'+country)),silent=pltsilent[idx])
pltf.overalltagplot2(tempexpanded,[[timeframestartdt[1],timeframeenddt[1]],[timeframestartdt[0],timeframeenddt[0]]],os.path.abspath(os.path.join(subfolderpath,'Overalltagsforpast2week_'+country)),silent=pltsilent[idx])
plotopenconvobytf=True
if plotopenconvobytf & ~Alloutdisable:
pltf.openconvobytfplot(temptopconvdfcopy,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'openbyday_1W_'+country)),silent=pltsilent[idx])
pltf.openconvobytfplot(temptopconvdfcopy,[timeframestartdt[2],timeframeenddt[2]],os.path.abspath(os.path.join(subfolderpath,'openbyday_1M_'+country)),silent=pltsilent[idx])
pltf.curropenconvplot(tempexpanded,os.path.abspath(os.path.join(subfolderpath,'openbyadmin_'+country)),silent=pltsilent[idx])
pltf.curropenconvplotbyissue(tempexpanded,os.path.abspath(os.path.join(subfolderpath,'openbyissue_'+country)),silent=pltsilent[idx])
plottagsbyday=True
if plottagsbyday & ~Alloutdisable:
pltf.tagsbytfplot(tempexpanded,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'tagsbyday_1W_'+country)),silent=pltsilent[idx])
plotoverallresponsestats=True
if plotoverallresponsestats & ~Alloutdisable:
pltf.overallresponsestatplot(temptopconvdfcopy,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'overallresponse_1W_'+country)),silent=pltsilent[idx])
pltf.overallresponsestatplot(temptopconvdfcopy,[timeframestartdt[2],timeframeenddt[2]],os.path.abspath(os.path.join(subfolderpath,'overallresponse_1M_'+country)),silent=pltsilent[idx])
plottagsbyschool=True
if plottagsbyschool & ~Alloutdisable:
try:
pltf.tagsbyschoolplot(tempexpanded,[timeframestartdt[0],timeframeenddt[0]],os.path.abspath(os.path.join(subfolderpath,'tagsbyschool_1W_'+country)),silent=pltsilent[idx])
pltf.tagsbyschoolplot(tempexpanded,[timeframestartdt[2],timeframeenddt[2]],os.path.abspath(os.path.join(subfolderpath,'tagsbyschool_1M_'+country)),silent=pltsilent[idx])
pltf.tagsbyschoolplot(tempexpanded,[timeframestartdt[5],timeframeenddt[5]],os.path.abspath(os.path.join(subfolderpath,'tagsbyschool_1Y_'+country)),silent=pltsilent[idx])
except Exception, err:
print(err)
pass
plotnonetags=True
if plotnonetags & ~Alloutdisable:
pltf.nonetagplot(temptopconvdfcopy,[timeframestartdt[0],timeframeenddt[0]],'issue',os.path.abspath(os.path.join(subfolderpath,'missingissue_1W_'+country)),silent=pltsilent[idx])
pltf.nonetagplot(temptopconvdfcopy,[timeframestartdt[0],timeframeenddt[0]],'school',os.path.abspath(os.path.join(subfolderpath,'missingschool_1W_'+country)),silent=pltsilent[idx])
#%% output to csv. Consider shifting earlier
if output:
convdfcopy=convdf.copy()
#if rebuild[0]:
# del convdfcopy['body'] ## special characters are screwing with the output writing
convdfcopy.to_csv(convstatsf, sep='\t', encoding="utf-8")
convdfcopy.to_csv(os.path.abspath(os.path.join(outputfolder,foldername,'convstats.csv')), sep='\t', encoding="utf-8")
print('Written to '+convstatsf)
topconvdfcopyoutput=topconvdfcopy.copy()
if rebuild[1]:
del topconvdfcopy['conversation_message']#objects saving has no meaning after import
'''
#rearranging columns before output
convcolumns=['adminname','convid','open','read','created_at','created_at_Date',
'created_at_Time','first_response','s_to_first_response','numclosed',
'first_closed','s_to_first_closed','last_closed','s_to_last_closed',
'updated_at','s_to_last_update','issue','numissues','school',
'numtags','nummessage','numassign','numclosed','numnote','user',
'username','email','role','assignee','s_response_bin',
's_resolve_bin']
#topconvdfcopyoutput=topconvdfcopy[convcolumns]
'''
topconvdfcopyoutput.to_csv(topconvstatsf, sep='\t', encoding="utf-8")
topconvdfcopyoutput.to_csv(os.path.abspath(os.path.join(outputfolder,foldername,'topconvstats.csv')), sep='\t', encoding="utf-8")
print('Written to '+ topconvstatsf)
if rebuild[2]:
#need to drop duplicates. ##########potential error source
if hasattr(userdf, 'Unnamed: 0'): del userdf['Unnamed: 0']#<unresolved>hides merging errors
userdf.drop_duplicates('id').to_csv(userf, sep='\t', encoding="utf-8")
userdf.to_csv(userf, sep='\t', encoding="utf-8")
userdf.to_csv(os.path.abspath(os.path.join(outputfolder,foldername,'user.csv')), sep='\t', encoding="utf-8")
print('Written to '+ userf)
'''
groupedbyadminstats.to_csv(groupbyadmintatsf,sep='\t', encoding="utf-8")
groupedbyadmindatesummary.to_csv('summary.csv',sep='\t', encoding="utf-8")
'''
pltf.zip_dir(os.path.abspath(os.path.join(outputfolder,foldername)))
#intercom.conversations.find_all(after='1505063464')
#timeframe=int((timenow-datetime.timedelta(7)-datetime.datetime(1970,1,1)).total_seconds()) | mit |
timzhangau/ml_nano | ud120/final_project/poi_id.py | 9 | 2364 | #!/usr/bin/python
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
### Task 1: Select what features you'll use.
### features_list is a list of strings, each of which is a feature name.
### The first feature must be "poi".
features_list = ['poi','salary'] # You will need to use more features
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
### Task 2: Remove outliers
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
from sklearn.cross_validation import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size=0.3, random_state=42)
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf, my_dataset, features_list) | mit |
MJuddBooth/pandas | pandas/tests/series/test_analytics.py | 1 | 58296 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
from distutils.version import LooseVersion
from itertools import product
import operator
import numpy as np
from numpy import nan
import pytest
from pandas.compat import PY2, PY35, is_platform_windows, lrange, range
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, Series, compat, date_range, isna,
notna)
from pandas.api.types import is_scalar
from pandas.core.index import MultiIndex
from pandas.core.indexes.datetimes import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_index_equal,
assert_series_equal)
class TestSeriesAnalytics(object):
def test_describe(self):
s = Series([0, 1, 2, 3, 4], name='int_data')
result = s.describe()
expected = Series([5, 2, s.std(), 0, 1, 2, 3, 4],
name='int_data',
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_series_equal(result, expected)
s = Series([True, True, False, False, False], name='bool_data')
result = s.describe()
expected = Series([5, 2, False, 3], name='bool_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
s = Series(['a', 'a', 'b', 'c', 'd'], name='str_data')
result = s.describe()
expected = Series([5, 4, 'a', 2], name='str_data',
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
def test_describe_with_tz(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
name = str(tz_naive_fixture)
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s = Series(date_range(start, end, tz=tz), name=name)
result = s.describe()
expected = Series(
[5, 5, s.value_counts().index[0], 1, start.tz_localize(tz),
end.tz_localize(tz)
],
name=name,
index=['count', 'unique', 'top', 'freq', 'first', 'last']
)
tm.assert_series_equal(result, expected)
def test_argsort(self, datetime_series):
self._check_accum_op('argsort', datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
assert s.dtype == 'datetime64[ns]'
shifted = s.shift(-1)
assert shifted.dtype == 'datetime64[ns]'
assert isna(shifted[4])
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
tm.assert_series_equal(mindexer, Series(mexpected),
check_dtype=False)
tm.assert_series_equal(qindexer, Series(qexpected),
check_dtype=False)
msg = (r"ndarray Expected type <(class|type) 'numpy\.ndarray'>,"
r" found <class 'pandas\.core\.series\.Series'> instead")
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_cumsum(self, datetime_series):
self._check_accum_op('cumsum', datetime_series)
def test_cumprod(self, datetime_series):
self._check_accum_op('cumprod', datetime_series)
def test_cummin(self, datetime_series):
tm.assert_numpy_array_equal(datetime_series.cummin().values,
np.minimum
.accumulate(np.array(datetime_series)))
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummax(self, datetime_series):
tm.assert_numpy_array_equal(datetime_series.cummax().values,
np.maximum
.accumulate(np.array(datetime_series)))
ts = datetime_series.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.dropna())
tm.assert_series_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'
]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT', '2000-1-1',
'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(['NaT', '2000-1-2', 'NaT',
'2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'
]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'1 min', ]))
result = s.cummin(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'1 min',
'1 min',
'1 min', ]))
result = s.cummin(skipna=False)
tm.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'1 min',
'NaT',
'3 min', ]))
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'NaT',
'2 min',
'NaT',
'3 min', ]))
result = s.cummax(skipna=True)
tm.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(['NaT',
'2 min',
'2 min',
'2 min',
'2 min',
'3 min', ]))
result = s.cummax(skipna=False)
tm.assert_series_equal(expected, result)
def test_npdiff(self):
pytest.skip("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_accum_op(self, name, datetime_series_, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(func(datetime_series_).values,
func(np.array(datetime_series_)),
check_dtype=check_dtype)
# with missing values
ts = datetime_series_.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected,
check_dtype=False)
def test_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
with tm.assert_produces_warning(FutureWarning):
result = s.compress(cond)
tm.assert_series_equal(result, expected)
def test_numpy_compress(self):
cond = [True, False, True, False, False]
s = Series([1, -1, 5, 8, 7],
index=list('abcde'), name='foo')
expected = Series(s.values.compress(cond),
index=list('ac'), name='foo')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(np.compress(cond, s), expected)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.compress(cond, s, axis=1)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.compress(cond, s, out=s)
def test_round(self, datetime_series):
datetime_series.index.name = "index_name"
result = datetime_series.round(2)
expected = Series(np.round(datetime_series.values, 2),
index=datetime_series.index, name='ts')
assert_series_equal(result, expected)
assert result.name == datetime_series.name
def test_numpy_round(self):
# See gh-12600
s = Series([1.53, 1.36, 0.06])
out = np.round(s, decimals=0)
expected = Series([2., 1., 0.])
assert_series_equal(out, expected)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(s, decimals=0, out=s)
@pytest.mark.xfail(
PY2 and is_platform_windows(), reason="numpy/numpy#7882",
raises=AssertionError, strict=True)
def test_numpy_round_nan(self):
# See gh-14197
s = Series([1.53, np.nan, 0.06])
with tm.assert_produces_warning(None):
result = s.round()
expected = Series([2., np.nan, 0.])
assert_series_equal(result, expected)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip(
'build in round cannot be overridden prior to Python 3')
s = Series([1.123, 2.123, 3.123], index=lrange(3))
result = round(s)
expected_rounded0 = Series([1., 2., 3.], index=lrange(3))
tm.assert_series_equal(result, expected_rounded0)
decimals = 2
expected_rounded = Series([1.12, 2.12, 3.12], index=lrange(3))
result = round(s, decimals)
tm.assert_series_equal(result, expected_rounded)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
assert not isinstance(result, Series)
@td.skip_if_no_scipy
def test_corr(self, datetime_series):
import scipy.stats as stats
# full overlap
tm.assert_almost_equal(datetime_series.corr(datetime_series), 1)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]),
1)
assert isna(datetime_series[:15].corr(datetime_series[5:],
min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.corr(ts2, min_periods=12))
# No overlap
assert np.isnan(datetime_series[::2].corr(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.corr(cp))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
tm.assert_almost_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
tm.assert_almost_equal(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
tm.assert_almost_equal(result, expected)
# these methods got rewritten in 0.8
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
pytest.skip("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series(
[-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -
0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606])
B = Series(
[-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292,
1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
tm.assert_almost_equal(A.corr(B, method='kendall'), kexp)
tm.assert_almost_equal(A.corr(B, method='spearman'), sexp)
def test_corr_invalid_method(self):
# GH PR #22298
s1 = pd.Series(np.random.randn(10))
s2 = pd.Series(np.random.randn(10))
msg = ("method must be either 'pearson', 'spearman', "
"or 'kendall'")
with pytest.raises(ValueError, match=msg):
s1.corr(s2, method="____")
def test_corr_callable_method(self, datetime_series):
# simple correlation example
# returns 1 if exact equality, 0 otherwise
my_corr = lambda a, b: 1. if (a == b).all() else 0.
# simple example
s1 = Series([1, 2, 3, 4, 5])
s2 = Series([5, 4, 3, 2, 1])
expected = 0
tm.assert_almost_equal(
s1.corr(s2, method=my_corr),
expected)
# full overlap
tm.assert_almost_equal(datetime_series.corr(
datetime_series, method=my_corr), 1.)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].corr(
datetime_series[5:], method=my_corr), 1.)
# No overlap
assert np.isnan(datetime_series[::2].corr(
datetime_series[1::2], method=my_corr))
# dataframe example
df = pd.DataFrame([s1, s2])
expected = pd.DataFrame([
{0: 1., 1: 0}, {0: 0, 1: 1.}])
tm.assert_almost_equal(
df.transpose().corr(method=my_corr), expected)
def test_cov(self, datetime_series):
# full overlap
tm.assert_almost_equal(datetime_series.cov(datetime_series),
datetime_series.std() ** 2)
# partial overlap
tm.assert_almost_equal(datetime_series[:15].cov(datetime_series[5:]),
datetime_series[5:15].std() ** 2)
# No overlap
assert np.isnan(datetime_series[::2].cov(datetime_series[1::2]))
# all NA
cp = datetime_series[:10].copy()
cp[:] = np.nan
assert isna(cp.cov(cp))
# min_periods
assert isna(datetime_series[:15].cov(datetime_series[5:],
min_periods=12))
ts1 = datetime_series[:15].reindex(datetime_series.index)
ts2 = datetime_series[5:].reindex(datetime_series.index)
assert isna(ts1.cov(ts2, min_periods=12))
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
datetime_series[::2] = np.NaN
assert datetime_series.count() == np.isfinite(datetime_series).sum()
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
assert np.all(result == expected.values)
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
msg = r"Dot product shape mismatch, \(4L?,\) vs \(3L?,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
# Series @ DataFrame
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=['1', '2', '3'])
assert_series_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Series @ Series
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# GH 21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a['p'] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_clip(self, datetime_series):
val = datetime_series.median()
with tm.assert_produces_warning(FutureWarning):
assert datetime_series.clip_lower(val).min() == val
with tm.assert_produces_warning(FutureWarning):
assert datetime_series.clip_upper(val).max() == val
assert datetime_series.clip(lower=val).min() == val
assert datetime_series.clip(upper=val).max() == val
result = datetime_series.clip(-0.5, 0.5)
expected = np.clip(datetime_series, -0.5, 0.5)
assert_series_equal(result, expected)
assert isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime(
[np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
with tm.assert_produces_warning(FutureWarning):
lower = s.clip_lower(thresh)
with tm.assert_produces_warning(FutureWarning):
upper = s.clip_upper(thresh)
assert lower[notna(lower)].min() == thresh
assert upper[notna(upper)].max() == thresh
assert list(isna(s)) == list(isna(lower))
assert list(isna(s)) == list(isna(upper))
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
s = Series([1, 2, 3])
assert_series_equal(s.clip(np.nan), Series([1, 2, 3]))
assert_series_equal(s.clip(upper=np.nan, lower=np.nan),
Series([1, 2, 3]))
# GH #19992
assert_series_equal(s.clip(lower=[0, 4, np.nan]),
Series([1, 4, np.nan]))
assert_series_equal(s.clip(upper=[1, np.nan, 1]),
Series([1, np.nan, 1]))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.clip_lower(threshold),
Series([1.0, 2.0, 4.0]))
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.clip_upper(threshold),
Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("upper", [[1, 2, 3], np.asarray([1, 2, 3])])
def test_clip_against_list_like(self, inplace, upper):
# GH #15390
original = pd.Series([5, 6, 7])
result = original.clip(upper=upper, inplace=inplace)
expected = pd.Series([1, 2, 3])
if inplace:
result = original
tm.assert_series_equal(result, expected, check_exact=True)
def test_clip_with_datetimes(self):
# GH 11838
# naive and tz-aware datetimes
t = Timestamp('2015-12-01 09:30:30')
s = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:31:00')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00'),
Timestamp('2015-12-01 09:30:30')])
assert_series_equal(result, expected)
t = Timestamp('2015-12-01 09:30:30', tz='US/Eastern')
s = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:31:00', tz='US/Eastern')])
result = s.clip(upper=t)
expected = Series([Timestamp('2015-12-01 09:30:00', tz='US/Eastern'),
Timestamp('2015-12-01 09:30:30', tz='US/Eastern')])
assert_series_equal(result, expected)
def test_cummethods_bool(self):
# GH 6270
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum,
'cumprod': np.cumprod,
'cummin': np.minimum.accumulate,
'cummax': np.maximum.accumulate}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse,
'cumprod': cpe,
'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
# GH: 16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list('abcdefghijk' * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, 'a', 'b', 'G', 'Y', 'Z', 'E',
'K', 'E', 'S', 'I', 'R', 'R'] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
msg = (r"only list-like objects are allowed to be passed to isin\(\),"
r" you passed a \[str\]")
with pytest.raises(TypeError, match=msg):
s.isin('a')
s = Series(['aaa', 'b', 'c'])
with pytest.raises(TypeError, match=msg):
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range('jan-01-2013', 'jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5), unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert np.ptp(ser) == np.ptp(arr)
# GH11163
s = Series([3, 5, np.nan, -3, 10])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert s.ptp() == 13
assert pd.isna(s.ptp(skipna=False))
mi = pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
tm.assert_series_equal(s.ptp(level=0, skipna=False), expected)
msg = r"No axis named 1 for object type <(class|type) 'type'>"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp()
msg = r"Series\.ptp does not implement numeric_only\."
with pytest.raises(NotImplementedError, match=msg):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s.ptp(numeric_only=True)
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name='x')
expected = Series(s.values.repeat(2), name='x',
index=s.index.values.repeat(2))
assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
def test_searchsorted(self):
s = Series([1, 2, 3])
result = s.searchsorted(1, side='left')
assert is_scalar(result)
assert result == 0
result = s.searchsorted(1, side='right')
assert is_scalar(result)
assert result == 1
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
assert is_scalar(r)
assert r == 2
r = s.searchsorted([30])
e = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
assert is_scalar(r)
assert r == 1
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(r, e)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range('20130101', periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
res = s.sort_index(level=['A', 'B'])
assert_series_equal(backwards, res)
res = s.sort_index(level='A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sort_index(level=['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_apply_categorical(self):
values = pd.Categorical(list('ABBABCD'), categories=list('DCBA'),
ordered=True)
s = pd.Series(values, name='XX', index=list('abcdefg'))
result = s.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list('abbabcd'), categories=list('dcba'),
ordered=True)
exp = pd.Series(values, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = s.apply(lambda x: 'A')
exp = pd.Series(['A'] * 7, name='XX', index=list('abcdefg'))
tm.assert_series_equal(result, exp)
assert result.dtype == np.object
def test_shift_int(self, datetime_series):
ts = datetime_series.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).dropna())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
assert np.all(sp1.values.codes[:1] == -1)
assert np.all(s.values.codes[:-1] == sp1.values.codes[1:])
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
assert np.all(sn2.values.codes[-2:] == -1)
assert np.all(s.values.codes[2:] == sn2.values.codes[:-2])
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_unstack(self):
from numpy import nan
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
codes=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values},
index=exp_index).sort_index(level=0)
unstacked = s.unstack(0).sort_index()
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1, 2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'
], ['a', 'a', 'b', 'a', 'b'],
[1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_value_counts_datetime(self):
# most dtypes are tested in test_base.py
values = [pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 11:00')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'])
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_datetime_tz(self):
values = [pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 09:00', tz='US/Eastern'),
pd.Timestamp('2011-01-01 11:00', tz='US/Eastern')]
exp_idx = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 11:00',
'2011-01-01 10:00'], tz='US/Eastern')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
idx = pd.DatetimeIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_period(self):
values = [pd.Period('2011-01', freq='M'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-01', freq='M'),
pd.Period('2011-03', freq='M')]
exp_idx = pd.PeriodIndex(['2011-01', '2011-03', '2011-02'], freq='M')
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check DatetimeIndex outputs the same result
idx = pd.PeriodIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_ordered(self):
# most dtypes are tested in test_base.py
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=True)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=True)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
def test_value_counts_categorical_not_ordered(self):
values = pd.Categorical([1, 2, 3, 1, 1, 3], ordered=False)
exp_idx = pd.CategoricalIndex([1, 3, 2], categories=[1, 2, 3],
ordered=False)
exp = pd.Series([3, 2, 1], index=exp_idx, name='xxx')
s = pd.Series(values, name='xxx')
tm.assert_series_equal(s.value_counts(), exp)
# check CategoricalIndex outputs the same result
idx = pd.CategoricalIndex(values, name='xxx')
tm.assert_series_equal(idx.value_counts(), exp)
# normalize
exp = pd.Series(np.array([3., 2., 1]) / 6.,
index=exp_idx, name='xxx')
tm.assert_series_equal(s.value_counts(normalize=True), exp)
tm.assert_series_equal(idx.value_counts(normalize=True), exp)
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [
dict(keepdims=True),
dict(out=object()),
])
@td.skip_if_np_lt_115
def test_validate_any_all_out_keepdims_raises(self, kwargs, func):
s = pd.Series([1, 2])
param = list(kwargs)[0]
name = func.__name__
msg = (r"the '{arg}' parameter is not "
r"supported in the pandas "
r"implementation of {fname}\(\)").format(arg=param, fname=name)
with pytest.raises(ValueError, match=msg):
func(s, **kwargs)
@td.skip_if_np_lt_115
def test_validate_sum_initial(self):
s = pd.Series([1, 2])
msg = (r"the 'initial' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)")
with pytest.raises(ValueError, match=msg):
np.sum(s, initial=10)
def test_validate_median_initial(self):
s = pd.Series([1, 2])
msg = (r"the 'overwrite_input' parameter is not "
r"supported in the pandas "
r"implementation of median\(\)")
with pytest.raises(ValueError, match=msg):
# It seems like np.median doesn't dispatch, so we use the
# method instead of the ufunc.
s.median(overwrite_input=True)
@td.skip_if_np_lt_115
def test_validate_stat_keepdims(self):
s = pd.Series([1, 2])
msg = (r"the 'keepdims' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)")
with pytest.raises(ValueError, match=msg):
np.sum(s, keepdims=True)
main_dtypes = [
'datetime',
'datetimetz',
'timedelta',
'int8',
'int16',
'int32',
'int64',
'float32',
'float64',
'uint8',
'uint16',
'uint32',
'uint64'
]
@pytest.fixture
def s_main_dtypes():
"""A DataFrame with many dtypes
* datetime
* datetimetz
* timedelta
* [u]int{8,16,32,64}
* float{32,64}
The columns are the name of the dtype.
"""
df = pd.DataFrame(
{'datetime': pd.to_datetime(['2003', '2002',
'2001', '2002',
'2005']),
'datetimetz': pd.to_datetime(
['2003', '2002',
'2001', '2002',
'2005']).tz_localize('US/Eastern'),
'timedelta': pd.to_timedelta(['3d', '2d', '1d',
'2d', '5d'])})
for dtype in ['int8', 'int16', 'int32', 'int64',
'float32', 'float64',
'uint8', 'uint16', 'uint32', 'uint64']:
df[dtype] = Series([3, 2, 1, 2, 5], dtype=dtype)
return df
@pytest.fixture(params=main_dtypes)
def s_main_dtypes_split(request, s_main_dtypes):
"""Each series in s_main_dtypes."""
return s_main_dtypes[request.param]
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
s = Series(vals, dtype=dtype)
result = getattr(s, method)(3)
expected_idxr = [0, 1, 2] if method == 'nsmallest' else [3, 2, 1]
expected = s.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestNLargestNSmallest(object):
@pytest.mark.parametrize(
"r", [Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
Series(list('abcde')),
Series(list('abcde'), dtype='category')])
def test_error(self, r):
dt = r.dtype
msg = ("Cannot use method 'n(larg|small)est' with "
"dtype {dt}".format(dt=dt))
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with pytest.raises(TypeError, match=msg):
method(arg)
def test_nsmallest_nlargest(self, s_main_dtypes_split):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
s = s_main_dtypes_split
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
def test_misc(self):
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with pytest.raises(ValueError, match=msg):
s.nsmallest(keep='invalid')
with pytest.raises(ValueError, match=msg):
s.nlargest(keep='invalid')
# GH 15297
s = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = s.nsmallest(3)
assert_series_equal(result, expected_first)
result = s.nsmallest(3, keep='last')
assert_series_equal(result, expected_last)
result = s.nlargest(3)
assert_series_equal(result, expected_first)
result = s.nlargest(3, keep='last')
assert_series_equal(result, expected_last)
@pytest.mark.parametrize('n', range(1, 5))
def test_n(self, n):
# GH 13412
s = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = s.nlargest(n)
expected = s.sort_values(ascending=False).head(n)
assert_series_equal(result, expected)
result = s.nsmallest(n)
expected = s.sort_values().head(n)
assert_series_equal(result, expected)
def test_boundary_integer(self, nselect_method, any_int_dtype):
# GH 21426
dtype_info = np.iinfo(any_int_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_dtype, nselect_method)
def test_boundary_float(self, nselect_method, float_dtype):
# GH 21426
dtype_info = np.finfo(float_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter(
[min_val, max_val], 0, dtype=float_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_dtype, nselect_method)
@pytest.mark.parametrize('dtype', ['datetime64[ns]', 'timedelta64[ns]'])
def test_boundary_datetimelike(self, nselect_method, dtype):
# GH 21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo('int64')
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
def test_duplicate_keep_all_ties(self):
# see gh-16818
s = Series([10, 9, 8, 7, 7, 7, 7, 6])
result = s.nlargest(4, keep='all')
expected = Series([10, 9, 8, 7, 7, 7, 7])
assert_series_equal(result, expected)
result = s.nsmallest(2, keep='all')
expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
assert_series_equal(result, expected)
class TestCategoricalSeriesAnalytics(object):
def test_count(self):
s = Series(Categorical([np.nan, 1, 2, np.nan],
categories=[5, 4, 3, 2, 1], ordered=True))
result = s.count()
assert result == 2
def test_value_counts(self):
# GH 12835
cats = Categorical(list('abcccb'), categories=list('cabd'))
s = Series(cats, name='xxx')
res = s.value_counts(sort=False)
exp_index = CategoricalIndex(list('cabd'), categories=cats.categories)
exp = Series([3, 1, 2, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
res = s.value_counts(sort=True)
exp_index = CategoricalIndex(list('cbad'), categories=cats.categories)
exp = Series([3, 2, 1, 0], name='xxx', index=exp_index)
tm.assert_series_equal(res, exp)
# check object dtype handles the Series.name as the same
# (tested in test_base.py)
s = Series(["a", "b", "c", "c", "c", "b"], name='xxx')
res = s.value_counts()
exp = Series([3, 2, 1], name='xxx', index=["c", "b", "a"])
tm.assert_series_equal(res, exp)
def test_value_counts_with_nan(self):
# see gh-9443
# sanity check
s = Series(["a", "b", "a"], dtype="category")
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# same Series via two different constructions --> same behaviour
series = [
Series(["a", "b", None, "a", None, None], dtype="category"),
Series(Categorical(["a", "b", None, "a", None, None],
categories=["a", "b"]))
]
for s in series:
# None is a NaN value, so we exclude its count here
exp = Series([2, 1], index=CategoricalIndex(["a", "b"]))
res = s.value_counts(dropna=True)
tm.assert_series_equal(res, exp)
# we don't exclude the count of None and sort by counts
exp = Series([3, 2, 1], index=CategoricalIndex([np.nan, "a", "b"]))
res = s.value_counts(dropna=False)
tm.assert_series_equal(res, exp)
# When we aren't sorting by counts, and np.nan isn't a
# category, it should be last.
exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan]))
res = s.value_counts(dropna=False, sort=False)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize(
"dtype",
["int_", "uint", "float_", "unicode_", "timedelta64[h]",
pytest.param("datetime64[D]",
marks=pytest.mark.xfail(reason="GH#7996"))]
)
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_non_bool(self, dtype, is_ordered):
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
# Test case 1
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
tc1 = Series(Categorical(input1, categories=cat_array,
ordered=is_ordered))
expected = Series([False, False, False, True])
tm.assert_series_equal(tc1.duplicated(), expected)
tm.assert_series_equal(tc1.drop_duplicates(), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, False])
tm.assert_series_equal(tc1.duplicated(keep='last'), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep='last'),
tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
expected = Series([False, False, True, True])
tm.assert_series_equal(tc1.duplicated(keep=False), expected)
tm.assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected])
sc = tc1.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc1[~expected])
# Test case 2
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
tc2 = Series(Categorical(
input2, categories=cat_array, ordered=is_ordered)
)
expected = Series([False, False, False, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(), expected)
tm.assert_series_equal(tc2.drop_duplicates(), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, False, False, False])
tm.assert_series_equal(tc2.duplicated(keep='last'), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep='last'),
tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
expected = Series([False, True, True, False, True, True, False])
tm.assert_series_equal(tc2.duplicated(keep=False), expected)
tm.assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected])
sc = tc2.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc2[~expected])
@pytest.mark.parametrize("is_ordered", [True, False])
def test_drop_duplicates_categorical_bool(self, is_ordered):
tc = Series(Categorical([True, False, True, False],
categories=[True, False], ordered=is_ordered))
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep='last'), expected)
tm.assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep='last', inplace=True)
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
sc.drop_duplicates(keep=False, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
| bsd-3-clause |
shyamalschandra/scikit-learn | sklearn/utils/tests/test_utils.py | 35 | 9000 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from scipy.linalg import eigh
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex,
assert_greater_equal)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.arpack import eigsh
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.graph import graph_laplacian
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_arpack_eigsh_initialization():
# Non-regression test that shows null-space computation is better with
# initialization of eigsh from [-1,1] instead of [0,1]
random_state = check_random_state(42)
A = random_state.rand(50, 50)
A = np.dot(A.T, A) # create s.p.d. matrix
A = graph_laplacian(A) + 1e-7 * np.identity(A.shape[0])
k = 5
# Test if eigsh is working correctly
# New initialization [-1,1] (as in original ARPACK)
# Was [0,1] before, with which this test could fail
v0 = random_state.uniform(-1,1, A.shape[0])
w, _ = eigsh(A, k=k, sigma=0.0, v0=v0)
# Eigenvalues of s.p.d. matrix should be nonnegative, w[0] is smallest
assert_greater_equal(w[0], 0)
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/linear_model/randomized_l1.py | 68 | 23405 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
adamrp/qiime | qiime/group.py | 15 | 35019 | #!/usr/bin/env python
"""This module contains functions useful for obtaining groupings."""
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jai Ram Rideout",
"Greg Caporaso",
"Jeremy Widmann"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
from collections import defaultdict
from functools import partial
import pandas as pd
import numpy as np
from qiime.stats import is_symmetric_and_hollow
from qiime.parse import group_by_field, parse_mapping_file
from qiime.filter import filter_mapping_file
def get_grouped_distances(dist_matrix_header, dist_matrix, mapping_header,
mapping, field, within=True,
suppress_symmetry_and_hollowness_check=False):
"""Returns a list of distance groupings for the specified field.
The return value is a list that contains tuples of three elements: the
first two elements are the field values being compared, and the third
element is a list of the distances.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
- dist_matrix_header: The distance matrix header, obtained from
parse.parse_distmat()
- dist_matrix: The distance matrix, obtained from
parse.parse_distmat().
- mapping_header: The mapping file header, obtained from
parse.parse_mapping_file()
- mapping: The mapping file's contents, obtained from
parse.parse_mapping_file()
- field: A field in the mapping file to do the grouping on.
- within: If True, distances are grouped within a field value. If
False, distances are grouped between field values.
- suppress_symmetry_and_hollowness_check: By default, the input
distance matrix will be checked for symmetry and hollowness. It is
recommended to leave this check in place for safety, as the check
is fairly fast. However, if you *know* you have a symmetric and
hollow distance matrix, you can disable this check for small
performance gains on extremely large distance matrices
"""
_validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping,
field)
mapping_data = [mapping_header]
mapping_data.extend(mapping)
groups = group_by_field(mapping_data, field)
return _get_groupings(dist_matrix_header, dist_matrix, groups, within,
suppress_symmetry_and_hollowness_check)
def get_all_grouped_distances(dist_matrix_header, dist_matrix, mapping_header,
mapping, field, within=True,
suppress_symmetry_and_hollowness_check=False):
"""Returns a list of distances for either samples within each of the
field values or between each of the field values for the specified field.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
- dist_matrix_header: The distance matrix header, obtained from
parse.parse_distmat()
- dist_matrix: The distance matrix, obtained from
parse.parse_distmat().
- mapping_header: The mapping file header, obtained from
parse.parse_mapping_file()
- mapping: The mapping file's contents, obtained from
parse.parse_mapping_file()
- field: A field in the mapping file to do the grouping on.
- within: If True, distances are grouped within a field value. If
False, distances are grouped between field values.
- suppress_symmetry_and_hollowness_check: By default, the input
distance matrix will be checked for symmetry and hollowness. It is
recommended to leave this check in place for safety, as the check
is fairly fast. However, if you *know* you have a symmetric and
hollow distance matrix, you can disable this check for small
performance gains on extremely large distance matrices
"""
distances = get_grouped_distances(dist_matrix_header, dist_matrix,
mapping_header, mapping, field, within,
suppress_symmetry_and_hollowness_check)
results = []
for group in distances:
for distance in group[2]:
results.append(distance)
return results
def get_field_state_comparisons(dist_matrix_header, dist_matrix,
mapping_header, mapping, field,
comparison_field_states,
suppress_symmetry_and_hollowness_check=False):
"""Returns a 2D dictionary relating distances between field states.
The 2D dictionary is constructed such that each top-level key is a field
state other than the field states in comparison_field_states. The
second-level key is a field state from comparison_field_states, and the
value at the (key, key) index is a list of distances between those two
field states. Thus, given a field, this function will create comparisons
between the specified comparison_field_states and all other field states.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
- dist_matrix_header: The distance matrix header, obtained from
parse.parse_distmat()
- dist_matrix: The distance matrix, obtained from
parse.parse_distmat().
- mapping_header: The mapping file header, obtained from
parse.parse_mapping_file()
- mapping: The mapping file's contents, obtained from
parse.parse_mapping_file()
- field: A field in the mapping file to do the comparisons on.
- comparison_field_states: A list of strings specifying the field
states to compare to all other field states. Cannot be an empty list.
- suppress_symmetry_and_hollowness_check: By default, the input
distance matrix will be checked for symmetry and hollowness. It is
recommended to leave this check in place for safety, as the check
is fairly fast. However, if you *know* you have a symmetric and
hollow distance matrix, you can disable this check for small
performance gains on extremely large distance matrices
"""
_validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping,
field)
# avoid empty groups of distances
mapping_header, mapping = filter_mapping_file(mapping, mapping_header,
dist_matrix_header)
# Make sure each comparison group field state is in the specified field.
if not comparison_field_states:
raise ValueError("You must provide at least one field state to "
"compare to all of the other field states.")
mapping_data = [mapping_header]
mapping_data.extend(mapping)
groups = group_by_field(mapping_data, field)
for field_state in comparison_field_states:
if field_state not in groups:
raise ValueError("The comparison group field state '%s' is not in "
"the provided mapping file's field '%s'."
% (field_state, field))
# Grab a list of all other field states (besides the ones in
# comparison_field_states). These will be the field states that the states
# in comparison_field_states will be compared against.
field_states = [group for group in groups.keys()
if group not in comparison_field_states]
# Get between distance groupings for the field of interest.
between_groupings = get_grouped_distances(dist_matrix_header, dist_matrix,
mapping_header, mapping, field, within=False,
suppress_symmetry_and_hollowness_check=
suppress_symmetry_and_hollowness_check)
# Build up our 2D dictionary giving the distances between a field state and
# a comparison group field state by filtering out the between_groupings
# list to include only the comparisons that we want.
result = {}
for field_state in field_states:
result[field_state] = {}
for comp_field_state in comparison_field_states:
result[field_state][comp_field_state] = []
for group in between_groupings:
if ((group[0] == field_state or group[1] == field_state)
and (group[0] == comp_field_state or
group[1] == comp_field_state)):
# We've found a group of distances between our comparison
# field state and the current field state, so keep the
# data.
result[field_state][comp_field_state] = group[2]
return result
def get_ordered_coordinates(coordinate_header,
coordinate_matrix,
order,
strict=False):
""" Return coordinate vectors in order
coordinate_header: ids corresponding to vectors
in coordinate_matrix (element 0 of output of
qiime.parse.parse_coords)
coordinate_matrix: the coordinate vectors (element 1 of
output of qiime.parse.parse_coords)
order: ordered ids from coordinate_header (usually sample
ids) for coordinates that should be extracted
strict: raise an error if an id from order is not present
in coordinate_header (default: that id is ignored)
The output of this function will be a tuple of the coordinate
vectors corresponding to each id in order, and the id order:
(ordered_coordinates, ordered_ids)
Note that the output order can be a subset of the input order
if some ids from order are not present in coordinate_header
and strict == False.
This function can be used in a way analogous to
get_adjacent_distances to get a set of coordinates that
might be connected by a line, for example.
"""
ordered_coordinates = []
ordered_ids = []
for o in order:
try:
coordinate_idx = coordinate_header.index(o)
except ValueError:
if strict:
raise ValueError(
"ID (%s) is not present in coordinate matrix" %
o)
else:
pass
else:
ordered_coordinates.append(coordinate_matrix[coordinate_idx])
ordered_ids.append(o)
return ordered_coordinates, ordered_ids
def get_adjacent_distances(dist_matrix_header,
dist_matrix,
sample_ids,
strict=False):
"""Return the distances between the adjacent sample_ids as a list
dist_matrix_header: distance matrix headers, e.g. the output
of qiime.parse.parse_distmat (element 0)
dist_matrix: distance matrix, e.g., the output of
qiime.parse.parse_distmat (element 1)
sample_ids: a list of sample ids
strict: boolean indicating whether to raise ValueError if a
sample_id is not in dm (default: False; sample_ids not in
dm are ignored)
The output of this function will be a list of the distances
between the adjacent sample_ids, and a list of the pair of sample ids
corresponding to each distance. This could subsequently be used, for
example, to plot unifrac distances between days in a timeseries, as
d1 to d2, d2 to d3, d3 to d4, and so on. The list of pairs of sample
ids are useful primarily in labeling axes when strict=False
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
"""
filtered_idx = []
filtered_sids = []
for sid in sample_ids:
try:
idx = dist_matrix_header.index(sid)
except ValueError:
if strict:
raise ValueError(
"Sample ID (%s) is not present in distance matrix" %
sid)
else:
pass
else:
filtered_idx.append(idx)
filtered_sids.append(sid)
if len(filtered_idx) < 2:
raise ValueError("At least two of your sample_ids must be present in the"
" distance matrix. %d are present." % len(filtered_idx))
distance_results = []
header_results = []
for i in range(len(filtered_idx) - 1):
distance_results.append(
dist_matrix[filtered_idx[i]][filtered_idx[i + 1]])
header_results.append(
(filtered_sids[i], filtered_sids[i + 1]))
return distance_results, header_results
def _group_by_sample_metadata(collapsed_md, sample_id_field="SampleID"):
"""Group sample identifiers by one or more metadata fields
Parameters
----------
collapsed_md : pd.DataFrame
The result of collapsing a sample metadata DataFrame, for example with
collapse_metadata.
sample_id_field : str, optional
The sample id field in the mapping_f.
Returns
-------
dict
Mapping of group id to set of input sample ids in that group.
dict
Mapping of input sample id to new group id.
pd.DataFrame
Sample metadata resulting from the collapse operation.
Raises
------
KeyError
If sample_id_field or any of the collapse fields are not column headers
in mapping_f.
"""
new_index_to_group = {}
old_index_to_new_index = {}
for i in collapsed_md.index:
old_indices = collapsed_md[sample_id_field][i]
# this is a little ugly, but we need to handle single and multi-index
# values here, and we always want to result to be a tuple
if isinstance(i, tuple):
new_index = i
else:
new_index = (i, )
new_index_to_group[new_index] = set(old_indices)
for old_index in old_indices:
old_index_to_new_index[old_index] = new_index
return new_index_to_group, old_index_to_new_index
def get_collapse_fns():
""" Return lookup of functions that can be used with biom.Table.collapse
"""
return {'median': _collapse_to_median,
'first': _collapse_to_first,
'random': _collapse_to_random,
'sum': _collapse_to_sum,
'mean': _collapse_to_mean}
def collapse_samples(table, mapping_f, collapse_fields, collapse_mode):
""" Collapse samples in a biom table and sample metadata
Parameters
----------
table : biom.Table
The biom table to be collapsed.
mapping_f : file handle or filepath
The sample metadata mapping file.
collapse_fields : iterable
The fields to combine when collapsing samples. For each sample in the
mapping_f, the ordered values from these columns will be tuplized and
used as the group identfier. Samples whose tuplized values in these
fields are identical will be grouped.
collapse_mode : str {sum, mean, median, random, first}
The strategy to use for collapsing counts in the table.
Returns
-------
biom.Table
The collapsed biom table.
pd.DataFrame
Sample metadata resulting from the collapse operation.
Raises
------
KeyError
If sample_id_field or any of the collapse fields are not column headers
in mapping_f.
"""
collapsed_metadata = _collapse_metadata(mapping_f,
collapse_fields)
new_index_to_group, old_index_to_new_index = \
_group_by_sample_metadata(collapsed_metadata)
partition_f = partial(_sample_id_from_group_id,
sid_to_group_id=old_index_to_new_index)
collapse_fns = get_collapse_fns()
try:
collapse_f = collapse_fns[collapse_mode]
except KeyError:
raise KeyError(
"Unknown collapse function %s. Valid choices are: "
"%s." % (collapse_mode, ', '.join(collapse_fns.keys())))
output_table = table.collapse(
partition_f, collapse_f=collapse_f, norm=False, axis='sample')
return collapsed_metadata, output_table
def mapping_lines_from_collapsed_df(collapsed_df):
""" Formats a multi-index DataFrame as lines of a QIIME mapping file
Parameters
----------
collapsed_df : pd.DataFrame
Sample metadata resulting from the collapse operation.
Returns
-------
list of strings
Lines representing the text of a QIIME mapping file.
"""
lines = []
lines.append('\t'.join(['#SampleID', 'original-sample-ids'] +\
list(collapsed_df.columns)[1:]))
for r in collapsed_df.iterrows():
# this is a little ugly, but we need to handle single and multi-index
# values here
if isinstance(r[0], tuple):
new_idx = '.'.join(map(str, r[0]))
else:
new_idx = str(r[0])
new_values = []
for e in r[1]:
if len(set(e)) == 1:
# if all samples in the replicate group have the same
# value for this column, just store that value
new_values.append(str(e[0]))
else:
# if any samples in the replicate group differ in the value
# in this column, store all of the values in the same order
# as the ids in the new "original-sample-ids" column
new_values.append('(%s)' % ', '.join(map(str,e)))
lines.append('\t'.join([new_idx] + new_values))
return lines
def _collapse_metadata(mapping_f, collapse_fields):
""" Load a mapping file into a DataFrame and then collapse rows
Parameters
----------
mapping_f : file handle or filepath
The sample metadata mapping file.
collapse_fields : iterable
The fields to combine when collapsing samples. For each sample in the
mapping_f, the ordered values from these columns will be tuplized and
used as the group identfier. Samples whose tuplized values in these
fields are identical will be grouped.
Returns
-------
pd.DataFrame
Sample metadata resulting from the collapse operation.
Raises
------
KeyError
If sample_id_field or any of the collapse fields are not column headers
in mapping_f.
"""
mapping_data, header, _ = parse_mapping_file(mapping_f)
sample_md = pd.DataFrame(mapping_data, columns=header)
grouped = sample_md.groupby(collapse_fields)
collapsed_md = grouped.agg(lambda x: tuple(x))
return collapsed_md
def _sample_id_from_group_id(id_, md, sid_to_group_id):
try:
group_id = sid_to_group_id[id_]
except KeyError:
raise KeyError("Sample id %s doesn't map to a group id." % id_)
return '.'.join(map(str, group_id))
def _collapse_to_first(t, axis):
return np.asarray([e[0] for e in t.iter_data(axis=axis, dense=True)])
def _collapse_to_median(t, axis):
return np.asarray([np.median(e) for e in t.iter_data(axis=axis, dense=True)])
def _collapse_to_sum(t, axis):
return np.asarray([np.sum(e) for e in t.iter_data(axis=axis)])
def _collapse_to_mean(t, axis):
return np.asarray([np.mean(e) for e in t.iter_data(axis=axis)])
def _collapse_to_random(t, axis):
if axis == 'sample':
length = t.length("observation")
elif axis == 'observation':
length = t.length("sample")
else:
raise UnknownAxisError(axis)
n = np.random.randint(length)
return np.asarray([e[n] for e in t.iter_data(axis=axis, dense=True)])
def _validate_input(dist_matrix_header, dist_matrix, mapping_header, mapping,
field):
"""Validates the input data to make sure it can be used and makes sense.
The headers, distance matrix, and mapping input should be iterable, and all
data should not be None. The field must exist in the mapping header.
"""
if (dist_matrix_header is None or dist_matrix is None or mapping_header is
None or mapping is None or field is None):
raise ValueError("The input(s) cannot be 'None'.")
# Make sure the appropriate input is iterable.
for input_arg in (dist_matrix_header, dist_matrix, mapping_header,
mapping):
try:
iter(input_arg)
except:
raise ValueError("The headers, distance matrix, and mapping data "
"must be iterable.")
# The field must be a string.
if not isinstance(field, str):
raise ValueError("The field must be a string.")
# Make sure the field is in the mapping header.
if field not in mapping_header:
raise ValueError("The field '%s' is not in the mapping file header."
% field)
# check that we share sample identifiers between th mf and the dm
if not set(zip(*mapping)[0]) & set(dist_matrix_header):
raise ValueError('The mapping file does not share at least one sample'
' with the distance matrix.')
def _get_indices(input_items, wanted_items):
"""Returns indices of the wanted items in the input items if present.
input_items must be iterable, and wanted_items may be either a single value
or a list. The return value will always be a list of indices, and an empty
list if none were found. If wanted_items is a single string, it is treated
as a scalar, not an iterable.
"""
# Note: Some of this code is taken from Jeremy Widmann's
# get_valid_indices() function, part of make_distance_histograms.py from QIIME 1.8.0.
try:
iter(input_items)
except:
raise ValueError("The input_items to search must be iterable.")
try:
len(wanted_items)
except:
# We have a scalar value, so put it in a list.
wanted_items = [wanted_items]
if isinstance(wanted_items, basestring):
wanted_items = [wanted_items]
return [input_items.index(item)
for item in wanted_items if item in input_items]
def _get_groupings(dist_matrix_header, dist_matrix, groups, within=True,
suppress_symmetry_and_hollowness_check=False):
"""Returns a list of distance groupings.
The return value is a list that contains tuples of three elements: the
first two elements are the field values being compared, and the third
element is a list of the distances.
WARNING: Only symmetric, hollow distance matrices may be used as input.
Asymmetric distance matrices, such as those obtained by the UniFrac Gain
metric (i.e. beta_diversity.py -m unifrac_g), should not be used as input.
Arguments:
- dist_matrix_header: The distance matrix header.
- dist_matrix: The distance matrix.
- groups: A dictionary mapping field value to sample IDs, obtained by
calling group_by_field().
- within: If True, distances are grouped within a field value. If
False, distances are grouped between field values.
- suppress_symmetry_and_hollowness_check: By default, the input
distance matrix will be checked for symmetry and hollowness. It is
recommended to leave this check in place for safety, as the check
is fairly fast. However, if you *know* you have a symmetric and
hollow distance matrix, you can disable this check for small
performance gains on extremely large distance matrices
If within is True, the zeros along the diagonal of the distance matrix are
omitted.
"""
# Note: Much of this code is taken from Jeremy Widmann's
# distances_by_groups() function, part of make_distance_histograms.py from QIIME 1.8.0.
if not suppress_symmetry_and_hollowness_check:
if not is_symmetric_and_hollow(dist_matrix):
raise ValueError("The distance matrix must be symmetric and "
"hollow.")
result = []
group_items = groups.items()
for i, (row_group, row_ids) in enumerate(group_items):
row_indices = _get_indices(dist_matrix_header, row_ids)
if within:
# Handle the case where indices are the same so we need to omit
# the diagonal.
block = dist_matrix[row_indices][:, row_indices]
size = len(row_indices)
indices = []
for i in range(size):
for j in range(i, size):
if i != j:
indices.append(block[i][j])
if indices:
result.append((row_group, row_group, indices))
else:
# Handle the case where indices are separate: just return blocks.
for j in range(i + 1, len(groups)):
col_group, col_ids = group_items[j]
col_indices = _get_indices(dist_matrix_header, col_ids)
vals = dist_matrix[row_indices][:, col_indices]
# Flatten the array into a single-level list.
vals = map(None, vals.flat)
if vals:
result.append((row_group, col_group, vals))
return result
def extract_per_individual_states_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
filter_missing_data=True):
"""
sample_metadata : 2d dictionary mapping sample ids to metadata (as
returned from qiime.parse.parse_mapping_file_to_dict)
state_category: metadata category name describing state of interest
(usually something like 'TreatmentState') as a string
state_values: ordered list of values of interest in the state_category
metadata entry (usually something like ['PreTreatment','PostTreatment'])
individual_identifier_category: metadata category name describing the
individual (usually something like 'PersonalID') as a string
filter_missing_data: if True, an individual is excluded
from the result object if any of it's values are None. This can occur
when there is no sample for one or more of the state values for an
individual. This is True by default.
returns {'individual-identifier':
[sample-id-at-state-value1,
sample-id-at-state-value2,
sample-id-at-state-value3,
...],
...
}
"""
# prep the result object, which will be a dict of lists
len_state_values = len(state_values)
def inner_dict_constructor():
return [None] * len_state_values
results = defaultdict(inner_dict_constructor)
for sample_id, metadata in sample_metadata.items():
try:
individual_id = metadata[individual_identifier_category]
except KeyError:
raise KeyError("%s is not a sample metadata category." %
individual_identifier_category)
try:
state_value = metadata[state_category]
except KeyError:
raise KeyError("%s is not a sample metadata category." %
state_category)
try:
state_index = state_values.index(state_value)
except ValueError:
# hit a state that is in the mapping file but not in
# state_values - this is silently ignored
continue
results[individual_id][state_index] = sample_id
if filter_missing_data:
# delete individual results if sample ids corresponding to
# any of the states are missing
for individual_id, sample_ids in results.items():
if None in sample_ids:
del results[individual_id]
return results
def extract_per_individual_state_metadatum_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
metadata_category,
process_f=float):
"""
sample_metadata : 2d dictionary mapping sample ids to metadata (as
returned from qiime.parse.parse_mapping_file_to_dict)
state_category: metadata category name describing state of interest
(usually something like 'TreatmentState') as a string
state_values: ordered list of values of interest in the state_category
metadata entry (usually something like ['PreTreatment','PostTreatment'])
individual_identifier_category: metadata category name describing the
individual (usually something like 'PersonalID') as a string
metadata_category: metadata category to extract from sample_metadata
process_f: function to apply to metadata values (default: float)
returns {'individual-identifier':
[state-1-metadata-value,
state-2-metadata-value,
...],
...
}
"""
per_individual_states = extract_per_individual_states_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
filter_missing_data=True)
results = {}
for individual_id, sample_ids in per_individual_states.items():
per_state_metadata_values = []
for sample_id in sample_ids:
try:
sample_metadata_value = sample_metadata[
sample_id][
metadata_category]
except KeyError:
raise KeyError(
"%s is not a sample metadata category." %
metadata_category)
try:
v = process_f(sample_metadata_value)
except ValueError as e:
v = None
per_state_metadata_values.append(v)
results[individual_id] = per_state_metadata_values
return results
def extract_per_individual_state_metadata_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
metadata_categories,
process_f=float):
"""
sample_metadata : 2d dictionary mapping sample ids to metadata (as
returned from qiime.parse.parse_mapping_file_to_dict)
state_category: metadata category name describing state of interest
(usually something like 'TreatmentState') as a string
state_values: ordered list of values of interest in the state_category
metadata entry (usually something like ['PreTreatment','PostTreatment'])
individual_identifier_category: metadata category name describing the
individual (usually something like 'PersonalID') as a string
metadata_categories: metadata categories to extract from sample_metadata
process_f: function to apply to metadata values (default: float)
returns {'metadata-category-1':
{'individual-identifier-1':
[difference-in-metadata-value-bw-states-2-and-1,
difference-in-metadata-value-bw-states-3-and-2,
...],
'individual-identifier-2:
[difference-in-metadata-value-bw-states-2-and-1,
difference-in-metadata-value-bw-states-3-and-2,
...],
}
...
}
"""
results = {}
for metadata_category in metadata_categories:
results[metadata_category] = \
extract_per_individual_state_metadatum_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
metadata_category,
process_f)
return results
def extract_per_individual_state_metadata_from_sample_metadata_and_biom(
sample_metadata,
biom_table,
state_category,
state_values,
individual_identifier_category,
observation_ids=None):
"""
sample_metadata : 2d dictionary mapping sample ids to metadata (as
returned from qiime.parse.parse_mapping_file_to_dict)
biom_table: biom table object containing observation counts for
samples in sample_metadata
state_category: metadata category name describing state of interest
(usually something like 'TreatmentState') as a string
state_values: ordered list of values of interest in the state_category
metadata entry (usually something like ['PreTreatment','PostTreatment'])
individual_identifier_category: metadata category name describing the
individual (usually something like 'PersonalID') as a string
observation_ids: observations (usually OTUs) to extract from biom_table
(default is all)
returns {'otu1':
{'individual-identifier-1:
[difference-in-otu1-abundance-bw-states-2-and-1,
difference-in-otu1-abundance-bw-states-3-and-2,
...],
'individual-identifier-2:
[difference-in-otu1-abundance-bw-states-2-and-1,
difference-in-otu1-abundance-bw-states-3-and-2,
...],
}
...
}
"""
per_individual_states = extract_per_individual_states_from_sample_metadata(
sample_metadata,
state_category,
state_values,
individual_identifier_category,
filter_missing_data=True)
results = {}
if observation_ids is None:
observation_ids = biom_table.ids(axis='observation')
for observation_id in observation_ids:
observation_data = biom_table.data(observation_id, 'observation')
results[observation_id] = {}
for individual_id, sample_ids in per_individual_states.items():
per_state_metadata_values = []
for sample_id in sample_ids:
sample_index = biom_table.index(sample_id, 'sample')
per_state_metadata_values.append(
observation_data[sample_index])
results[observation_id][individual_id] = per_state_metadata_values
return results
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.