repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 1 | 10713 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(
expected_values, col, actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand())
if np.random.rand() > 0.5 else "")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(delete=False)
w = tf.python_io.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(tf.test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df,
batch_size=10,
shuffle=False)
batch = tensorflow_df.run_once()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [8, 4, 7]
num_batches = 10
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
num_epochs=num_epochs,
shuffle=False,
default_values=default_values)
actual_num_batches = len(list(tensorflow_df.run()))
self.assertEqual(expected_num_batches, actual_num_batches)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": tf.FixedLenFeature(None, dtypes.int16, np.nan),
"float": tf.VarLenFeature(dtypes.float16),
"bool": tf.VarLenFeature(dtypes.bool),
"string": tf.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
s[i] = ""
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float": tf.FixedLenFeature(shape=[2],
dtype=tf.float32,
default_value=[0.0, 0.0]),
"var_len_int": tf.VarLenFeature(dtype=tf.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mayoub/hhana | mva/classify.py | 4 | 25742 | # stdlib imports
import os
import pickle
from operator import itemgetter
import types
import shutil
from cStringIO import StringIO
# numpy imports
import numpy as np
# scikit-learn imports
import sklearn
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier, export_graphviz
# rootpy imports
from rootpy.extern.tabulartext import PrettyTable
# root_numpy imports
from root_numpy import rec2array, fill_hist
# local imports
from . import log; log = log[__name__]
from . import MMC_MASS, MMC_PT
from .plotting import plot_grid_scores
from . import variables, CACHE_DIR, BDT_DIR
from .systematics import systematic_name
from .grid_search import BoostGridSearchCV
def print_feature_ranking(clf, fields):
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
log.info("Feature ranking:")
out = StringIO()
print >> out
print >> out
print >> out, r"\begin{tabular}{c|c|c}"
table = PrettyTable(["Rank", "Variable", "Importance"])
print >> out, r"\hline\hline"
print >> out, r"Rank & Variable & Importance\\"
for f, idx in enumerate(indices):
table.add_row([f + 1,
fields[idx],
'%.3f' % importances[idx]])
print >> out, r"%d & %s & %.3f\\" % (f + 1,
variables.VARIABLES[fields[idx]]['title'],
importances[idx])
print >> out, r"\end{tabular}"
print >> out
print >> out, table.get_string(hrules=1)
log.info(out.getvalue())
def histogram_scores(hist_template, scores,
min_score=None, max_score=None,
inplace=False):
if not inplace:
hist = hist_template.Clone(name=hist_template.name + "_scores")
hist.Reset()
else:
hist = hist_template
if min_score is not None:
log.info("cutting out scores below %f" % min_score)
if max_score is not None:
log.info("cutting out scores above %f" % max_score)
if isinstance(scores, np.ndarray):
if min_score is not None:
scores = scores[scores > min_score]
if max_score is not None:
scores = scores[scores < max_score]
fill_hist(hist, scores)
elif isinstance(scores, tuple):
# data
scores, weight = scores
if min_score is not None:
scores_idx = scores > min_score
scores = scores[scores_idx]
weight = weight[scores_idx]
if max_score is not None:
scores_idx = scores < max_score
scores = scores[scores_idx]
weight = weight[scores_idx]
assert (weight == 1).all()
fill_hist(hist, scores)
elif isinstance(scores, dict):
# non-data with possible systematics
# nominal case:
nom_scores, nom_weight = scores['NOMINAL']
if min_score is not None:
scores_idx = nom_scores > min_score
nom_scores = nom_scores[scores_idx]
nom_weight = nom_weight[scores_idx]
if max_score is not None:
scores_idx = nom_scores < max_score
nom_scores = nom_scores[scores_idx]
nom_weight = nom_weight[scores_idx]
fill_hist(hist, nom_scores, nom_weight)
# systematics
sys_hists = {}
for sys_term, (sys_scores, sys_weight) in scores.items():
if sys_term == 'NOMINAL':
continue
if min_score is not None:
scores_idx = sys_scores > min_score
sys_scores = sys_scores[scores_idx]
sys_weight = sys_weight[scores_idx]
if max_score is not None:
scores_idx = sys_scores < max_score
sys_scores = sys_scores[scores_idx]
sys_weight = sys_weight[scores_idx]
sys_hist = hist.Clone(
name=hist.name + "_" + systematic_name(sys_term))
sys_hist.Reset()
fill_hist(sys_hist, sys_scores, sys_weight)
sys_hists[sys_term] = sys_hist
hist.systematics = sys_hists
else:
raise TypeError("scores not an np.array, tuple or dict")
return hist
def write_score_hists(f, mass, scores_list, hist_template, no_neg_bins=True):
sys_hists = {}
for samp, scores_dict in scores_list:
for sys_term, (scores, weights) in scores_dict.items():
if sys_term == 'NOMINAL':
suffix = ''
else:
suffix = '_' + '_'.join(sys_term)
hist = hist_template.Clone(
name=samp.name + ('_{0}'.format(mass)) + suffix)
fill_hist(hist, scores, weights)
if sys_term not in sys_hists:
sys_hists[sys_term] = []
sys_hists[sys_term].append(hist)
f.cd()
for sys_term, hists in sys_hists.items():
bad_bins = []
if no_neg_bins:
# check for negative bins over all systematics and zero them out
# negative bins cause lots of problem in the limit setting
# negative bin contents effectively means
# the same as "no events here..."
total_hist = sum(hists)
for bin, content in enumerate(total_hist):
if content < 0:
log.warning("Found negative bin %d (%f) for "
"systematic %s" % (
bin, content, sys_term))
bad_bins.append(bin)
for hist in hists:
for bin in bad_bins:
# zero out bad bins
hist[bin] = 0.
hist.Write()
def make_dataset(signals, backgrounds,
category, region, fields,
cuts=None):
signal_arrs = []
signal_weight_arrs = []
background_arrs = []
background_weight_arrs = []
for signal in signals:
rec = signal.merged_records(
category=category,
region=region,
fields=fields,
cuts=cuts)
signal_weight_arrs.append(rec['weight'])
signal_arrs.append(rec2array(rec, fields))
for background in backgrounds:
rec = background.merged_records(
category=category,
region=region,
fields=fields,
cuts=cuts)
background_weight_arrs.append(rec['weight'])
background_arrs.append(rec2array(rec, fields))
signal_array = np.concatenate(signal_arrs)
signal_weight_array = np.concatenate(signal_weight_arrs)
background_array = np.concatenate(background_arrs)
background_weight_array = np.concatenate(background_weight_arrs)
return (signal_array, signal_weight_array,
background_array, background_weight_array)
def make_partitioned_dataset(signals, backgrounds,
category, region, fields,
partition_key,
cuts=None):
signal_arrs = []
signal_weight_arrs = []
background_arrs = []
background_weight_arrs = []
for signal in signals:
left, right = signal.partitioned_records(
category=category,
region=region,
fields=fields,
cuts=cuts,
key=partition_key)
signal_weight_arrs.append(
(left['weight'], right['weight']))
signal_arrs.append(
(rec2array(left, fields),
rec2array(right, fields)))
for background in backgrounds:
left, right = background.partitioned_records(
category=category,
region=region,
fields=fields,
cuts=cuts,
key=partition_key)
background_weight_arrs.append(
(left['weight'], right['weight']))
background_arrs.append(
(rec2array(left, fields),
rec2array(right, fields)))
return (signal_arrs, signal_weight_arrs,
background_arrs, background_weight_arrs)
def get_partition(s, sw, b, bw, partition_idx):
# select partition and merge arrays
s = np.concatenate(map(itemgetter(partition_idx), s))
sw = np.concatenate(map(itemgetter(partition_idx), sw))
b = np.concatenate(map(itemgetter(partition_idx), b))
bw = np.concatenate(map(itemgetter(partition_idx), bw))
return s, sw, b, bw
def prepare_dataset(signal_train, signal_weight_train,
background_train, background_weight_train,
max_sig=None,
max_bkg=None,
norm_sig_to_bkg=True,
same_size_sig_bkg=True,
remove_negative_weights=False):
if remove_negative_weights:
# remove samples from the training sample with a negative weight
signal_train = signal_train[signal_weight_train >= 0]
background_train = background_train[background_weight_train >= 0]
signal_weight_train = signal_weight_train[signal_weight_train >= 0]
background_weight_train = background_weight_train[background_weight_train >= 0]
log.info("removing events with negative weights")
if max_sig is not None and max_sig < len(signal_train):
subsample = np.random.permutation(len(signal_train))[:max_sig_train]
signal_train = signal_train[subsample]
signal_weight_train = signal_weight_train[subsample]
log.info("signal stats reduced to user-specified maximum")
if max_bkg is not None and max_bkg < len(background_train):
subsample = np.random.permutation(len(background_train))[:max_bkg_train]
background_train = background_train[subsample]
background_weight_train = background_weight_train[subsample]
log.info("background stats reduced to user-specified maximum")
if same_size_sig_bkg:
if len(background_train) > len(signal_train):
# random subsample of background so it's the same size as signal
subsample = np.random.permutation(
len(background_train))[:len(signal_train)]
background_train = background_train[subsample]
background_weight_train = background_weight_train[subsample]
log.info("number of background events reduced "
"to match number of signal events")
elif len(background_train) < len(signal_train):
# random subsample of signal so it's the same size as background
subsample = np.random.permutation(
len(signal_train))[:len(background_train)]
signal_train = signal_train[subsample]
signal_weight_train = signal_weight_train[subsample]
log.info("number of signal events reduced "
"to match number of background events")
if norm_sig_to_bkg:
# normalize signal to background
signal_weight_train *= (
background_weight_train.sum() / signal_weight_train.sum())
log.info("normalizing signal to match background")
log.info("training Samples:")
log.info("signal: %d events, %s features" % signal_train.shape)
log.info("sum(signal weights): %f" % signal_weight_train.sum())
log.info("background: %d events, %s features" % background_train.shape)
log.info("sum(background weights): %f" % background_weight_train.sum())
log.info("total: %d events" % (
signal_train.shape[0] +
background_train.shape[0]))
sample_train = np.concatenate((background_train, signal_train))
sample_weight_train = np.concatenate(
(background_weight_train, signal_weight_train))
labels_train = np.concatenate(
(np.zeros(len(background_train)), np.ones(len(signal_train))))
# random permutation of training sample
perm = np.random.permutation(len(labels_train))
sample_train = sample_train[perm]
sample_weight_train = sample_weight_train[perm]
labels_train = labels_train[perm]
return sample_train, labels_train, sample_weight_train
class Classifier(object):
# minimal list of spectators
SPECTATORS = [
MMC_PT,
MMC_MASS,
]
def __init__(self,
mass,
fields,
category,
region,
cuts=None,
spectators=None,
output_suffix="",
clf_output_suffix="",
partition_key='EventNumber',
transform=True,
mmc=True):
fields = fields[:]
if not mmc:
try:
fields.remove(MMC_MASS)
except ValueError:
pass
self.mass = mass
self.fields = fields
self.category = category
self.region = region
self.spectators = spectators
self.output_suffix = output_suffix
self.clf_output_suffix = clf_output_suffix
self.partition_key = partition_key
self.transform = transform
self.mmc = mmc
self.background_label = 0
self.signal_label = 1
if spectators is None:
spectators = []
# merge in minimal list of spectators
for spec in Classifier.SPECTATORS:
if spec not in spectators and spec not in fields:
spectators.append(spec)
self.all_fields = fields + spectators
assert 'weight' not in fields
# classifiers for the left and right partitions
# each trained on the opposite partition
self.clfs = None
def binning(self, year, overflow=None):
# get the binning (see the optimize-binning script)
with open(os.path.join(CACHE_DIR, 'binning/binning_{0}_{1}_{2}.pickle'.format(
self.category.name, self.mass, year % 1000))) as f:
binning = pickle.load(f)
if overflow is not None:
binning[0] -= overflow
binning[-1] += overflow
return binning
def load(self, swap=False):
"""
If swap is True then use the internal classifiers on the "wrong"
partitions. This is used when demonstrating stability in data. The
shape of the data distribution should be the same for both classifiers.
"""
use_cache = True
# attempt to load existing classifiers
clfs = [None, None]
for partition_idx in range(2):
category_name = self.category.get_parent().name
clf_filename = os.path.join(BDT_DIR,
'clf_{0}_{1}{2}_{3}.pickle'.format(
category_name, self.mass,
self.clf_output_suffix, partition_idx))
log.info("attempting to open %s ..." % clf_filename)
if os.path.isfile(clf_filename):
# use a previously trained classifier
log.info("found existing classifier in %s" % clf_filename)
with open(clf_filename, 'r') as f:
clf = pickle.load(f)
out = StringIO()
print >> out
print >> out
print >> out, clf
log.info(out.getvalue())
print_feature_ranking(clf, self.fields)
if swap:
# DANGER
log.warning("will apply classifiers on swapped partitions")
clfs[partition_idx] = clf
else:
clfs[(partition_idx + 1) % 2] = clf
else:
log.warning("could not open %s" % clf_filename)
use_cache = False
break
if use_cache:
self.clfs = clfs
log.info("using previously trained classifiers")
return True
else:
log.warning(
"unable to load previously trained "
"classifiers; train new ones")
return False
def train(self,
signals,
backgrounds,
cuts=None,
max_sig=None,
max_bkg=None,
norm_sig_to_bkg=True,
same_size_sig_bkg=False,
remove_negative_weights=False,
max_trees=200,
min_trees=1,
learning_rate=0.1,
max_fraction=0.3,
min_fraction=0.001,
min_fraction_steps=200,
cv_nfold=10,
n_jobs=-1,
dry_run=False):
"""
Determine best BDTs on left and right partitions. Each BDT will then be
used on the other partition.
"""
signal_arrs, signal_weight_arrs, \
background_arrs, background_weight_arrs = make_partitioned_dataset(
signals, backgrounds,
category=self.category,
region=self.region,
fields=self.fields,
cuts=cuts,
partition_key=self.partition_key)
if not dry_run:
self.clfs = [None, None]
for partition_idx in range(2):
clf_filename = os.path.join(BDT_DIR,
'clf_{0}_{1}{2}_{3}'.format(
self.category.name, self.mass,
self.clf_output_suffix, partition_idx))
signal_train, signal_weight_train, \
background_train, background_weight_train = get_partition(
signal_arrs, signal_weight_arrs,
background_arrs, background_weight_arrs,
partition_idx)
sample_train, labels_train, sample_weight_train = prepare_dataset(
signal_train, signal_weight_train,
background_train, background_weight_train,
max_sig=max_sig,
max_bkg=max_bkg,
norm_sig_to_bkg=norm_sig_to_bkg,
same_size_sig_bkg=same_size_sig_bkg,
remove_negative_weights=remove_negative_weights)
if dry_run:
return
log.info("training a new classifier...")
if partition_idx == 0:
# grid search params
# min_samples_leaf
#min_leaf_high = int((sample_train.shape[0] / 8) *
# (cv_nfold - 1.) / cv_nfold)
#min_leaf_low = max(10, int(min_leaf_high / 100.))
#min_leaf_step = max((min_leaf_high - min_leaf_low) / 100, 1)
#min_samples_leaf = range(
# min_leaf_low, min_leaf_high, min_leaf_step)
# min_fraction_leaf
min_fraction_leaf = np.linspace(
min_fraction, max_fraction, min_fraction_steps)
grid_params = {
#'base_estimator__min_samples_leaf': min_samples_leaf,
'base_estimator__min_fraction_leaf': min_fraction_leaf,
}
# create a BDT
clf = AdaBoostClassifier(
DecisionTreeClassifier(),
learning_rate=learning_rate,
algorithm='SAMME.R',
random_state=0)
# more efficient grid-search for boosting
grid_clf = BoostGridSearchCV(
clf, grid_params,
max_n_estimators=max_trees,
min_n_estimators=min_trees,
#score_func=accuracy_score,
score_func=roc_auc_score, # area under the ROC curve
cv=StratifiedKFold(labels_train, cv_nfold),
n_jobs=n_jobs)
#grid_clf = GridSearchCV(
# clf, grid_params,
# score_func=accuracy_score,
# cv = StratifiedKFold(labels_train, cv_nfold),
# n_jobs=n_jobs)
log.info("")
log.info("using a %d-fold cross validation" % cv_nfold)
log.info("performing a grid search over these parameter values:")
for param, values in grid_params.items():
log.info('{0} {1}'.format(param.split('__')[-1], values))
log.info("Minimum number of trees: %d" % min_trees)
log.info("Maximum number of trees: %d" % max_trees)
log.info("")
log.info("training new classifiers ...")
# perform the cross-validated grid-search
grid_clf.fit(
sample_train, labels_train,
sample_weight=sample_weight_train)
clf = grid_clf.best_estimator_
grid_scores = grid_clf.grid_scores_
log.info("Best score: %f" % grid_clf.best_score_)
log.info("Best Parameters:")
log.info(grid_clf.best_params_)
# plot a grid of the scores
plot_grid_scores(
grid_scores,
best_point={
'base_estimator__min_fraction_leaf':
clf.base_estimator.min_fraction_leaf,
'n_estimators':
clf.n_estimators},
params={
'base_estimator__min_fraction_leaf':
'leaf fraction',
'n_estimators':
'trees'},
name=(self.category.name +
("_{0}".format(self.mass)) +
self.output_suffix +
("_{0}".format(partition_idx))))
# save grid scores
with open('{0}_grid_scores.pickle'.format(clf_filename), 'w') as f:
pickle.dump(grid_scores, f)
# scale up the min-leaf and retrain on the whole set
#min_samples_leaf = clf.base_estimator.min_samples_leaf
#clf = sklearn.clone(clf)
#clf.base_estimator.min_samples_leaf = int(
# min_samples_leaf *
# cv_nfold / float(cv_nfold - 1))
#clf.fit(sample_train, labels_train,
# sample_weight=sample_weight_train)
#log.info("After scaling up min_leaf")
#out = StringIO()
#print >> out
#print >> out
#print >> out, clf
#log.info(out.getvalue())
else: # training on the other partition
log.info("training a new classifier ...")
# use same params as in first partition
clf = sklearn.clone(clf)
out = StringIO()
print >> out
print >> out
print >> out, clf
log.info(out.getvalue())
clf.fit(sample_train, labels_train,
sample_weight=sample_weight_train)
# export to graphviz dot format
if os.path.isdir(clf_filename):
shutil.rmtree(clf_filename)
os.mkdir(clf_filename)
for itree, tree in enumerate(clf):
export_graphviz(
tree,
out_file=os.path.join(
clf_filename,
'tree_{0:04d}.dot'.format(itree)),
feature_names=self.all_fields)
with open('{0}.pickle'.format(clf_filename), 'w') as f:
pickle.dump(clf, f)
print_feature_ranking(clf, self.fields)
self.clfs[(partition_idx + 1) % 2] = clf
def classify(self, sample, category, region,
cuts=None, systematic='NOMINAL'):
if self.clfs == None:
raise RuntimeError("you must train the classifiers first")
partitions = sample.partitioned_records(
category=category,
region=region,
fields=self.fields,
cuts=cuts,
systematic=systematic,
num_partitions=2,
return_idx=True,
key=self.partition_key)
score_idx = [[], []]
for i, partition in enumerate(partitions):
for rec, idx in partition:
weight = rec['weight']
arr = rec2array(rec, self.fields)
# each classifier is never used on the partition that trained it
scores = self.clfs[i].decision_function(arr)
score_idx[i].append((idx, scores, weight))
# must preserve order of scores wrt the other fields!
# merge the scores and weights according to the idx
merged_scores = []
merged_weight = []
for left, right in zip(*score_idx):
left_idx, left_scores, left_weight = left
right_idx, right_scores, right_weight = right
insert_idx = np.searchsorted(left_idx, right_idx)
scores = np.insert(left_scores, insert_idx, right_scores)
weight = np.insert(left_weight, insert_idx, right_weight)
merged_scores.append(scores)
merged_weight.append(weight)
scores = np.concatenate(merged_scores)
weight = np.concatenate(merged_weight)
if self.transform:
log.info("classifier scores are transformed")
if isinstance(self.transform, types.FunctionType):
# user-defined transformation
scores = self.transform(scores)
else:
# logistic tranformation used by TMVA (MethodBDT.cxx)
scores = -1 + 2.0 / (1.0 +
np.exp(-self.clfs[0].n_estimators *
self.clfs[0].learning_rate * scores / 1.5))
return scores, weight
| gpl-3.0 |
go-bears/nupic | examples/opf/tools/MirrorImageViz/mirrorImageViz.py | 50 | 7221 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
# Author: Surabhi Gupta
import sys
import numpy as np
import matplotlib.pylab as pyl
def analyzeOverlaps(activeCoincsFile, encodingsFile, dataset):
'''Mirror Image Visualization: Shows the encoding space juxtaposed against the
coincidence space. The encoding space is the bottom-up sensory encoding and
the coincidence space depicts the corresponding activation of coincidences in
the SP. Hence, the mirror image visualization is a visual depiction of the
mapping of SP cells to the input representations.
Note:
* The files spBUOut and sensorBUOut are assumed to be in the output format
used for LPF experiment outputs.
* BU outputs for some sample datasets are provided. Specify the name of the
dataset as an option while running this script.
'''
lines = activeCoincsFile.readlines()
inputs = encodingsFile.readlines()
w = len(inputs[0].split(' '))-1
patterns = set([])
encodings = set([])
coincs = [] #The set of all coincidences that have won at least once
reUsedCoincs = []
firstLine = inputs[0].split(' ')
size = int(firstLine.pop(0))
spOutput = np.zeros((len(lines),40))
inputBits = np.zeros((len(lines),w))
print 'Total n:', size
print 'Total number of records in the file:', len(lines), '\n'
print 'w:', w
count = 0
for x in xrange(len(lines)):
inputSpace = [] #Encoded representation for each input
spBUout = [int(z) for z in lines[x].split(' ')]
spBUout.pop(0) #The first element of each row of spBUOut is the size of the SP
temp = set(spBUout)
spOutput[x]=spBUout
input = [int(z) for z in inputs[x].split(' ')]
input.pop(0) #The first element of each row of sensorBUout is the size of the encoding space
tempInput = set(input)
inputBits[x]=input
#Creating the encoding space
for m in xrange(size):
if m in tempInput:
inputSpace.append(m)
else:
inputSpace.append('|') #A non-active bit
repeatedBits = tempInput.intersection(encodings) #Storing the bits that have been previously active
reUsed = temp.intersection(patterns) #Checking if any of the active cells have been previously active
#Dividing the coincidences into two difference categories.
if len(reUsed)==0:
coincs.append((count,temp,repeatedBits,inputSpace, tempInput)) #Pattern no, active cells, repeated bits, encoding (full), encoding (summary)
else:
reUsedCoincs.append((count,temp,repeatedBits,inputSpace, tempInput))
patterns=patterns.union(temp) #Adding the active cells to the set of coincs that have been active at least once
encodings = encodings.union(tempInput)
count +=1
overlap = {}
overlapVal = 0
seen = []
seen = (printOverlaps(coincs, coincs, seen))
print len(seen), 'sets of 40 cells'
seen = printOverlaps(reUsedCoincs, coincs, seen)
Summ=[]
for z in coincs:
c=0
for y in reUsedCoincs:
c += len(z[1].intersection(y[1]))
Summ.append(c)
print 'Sum: ', Summ
for m in xrange(3):
displayLimit = min(51, len(spOutput[m*200:]))
if displayLimit>0:
drawFile(dataset, np.zeros([len(inputBits[:(m+1)*displayLimit]),len(inputBits[:(m+1)*displayLimit])]), inputBits[:(m+1)*displayLimit], spOutput[:(m+1)*displayLimit], w, m+1)
else:
print 'No more records to display'
pyl.show()
def drawFile(dataset, matrix, patterns, cells, w, fnum):
'''The similarity of two patterns in the bit-encoding space is displayed alongside
their similarity in the sp-coinc space.'''
score=0
count = 0
assert len(patterns)==len(cells)
for p in xrange(len(patterns)-1):
matrix[p+1:,p] = [len(set(patterns[p]).intersection(set(q)))*100/w for q in patterns[p+1:]]
matrix[p,p+1:] = [len(set(cells[p]).intersection(set(r)))*5/2 for r in cells[p+1:]]
score += sum(abs(np.array(matrix[p+1:,p])-np.array(matrix[p,p+1:])))
count += len(matrix[p+1:,p])
print 'Score', score/count
fig = pyl.figure(figsize = (10,10), num = fnum)
pyl.matshow(matrix, fignum = fnum)
pyl.colorbar()
pyl.title('Coincidence Space', verticalalignment='top', fontsize=12)
pyl.xlabel('The Mirror Image Visualization for '+dataset, fontsize=17)
pyl.ylabel('Encoding space', fontsize=12)
def printOverlaps(comparedTo, coincs, seen):
""" Compare the results and return True if success, False if failure
Parameters:
--------------------------------------------------------------------
coincs: Which cells are we comparing?
comparedTo: The set of 40 cells we being compared to (they have no overlap with seen)
seen: Which of the cells we are comparing to have already been encountered.
This helps glue together the unique and reused coincs
"""
inputOverlap = 0
cellOverlap = 0
for y in comparedTo:
closestInputs = []
closestCells = []
if len(seen)>0:
inputOverlap = max([len(seen[m][1].intersection(y[4])) for m in xrange(len(seen))])
cellOverlap = max([len(seen[m][0].intersection(y[1])) for m in xrange(len(seen))])
for m in xrange( len(seen) ):
if len(seen[m][1].intersection(y[4]))==inputOverlap:
closestInputs.append(seen[m][2])
if len(seen[m][0].intersection(y[1]))==cellOverlap:
closestCells.append(seen[m][2])
seen.append((y[1], y[4], y[0]))
print 'Pattern',y[0]+1,':',' '.join(str(len(z[1].intersection(y[1]))).rjust(2) for z in coincs),'input overlap:', inputOverlap, ';', len(closestInputs), 'closest encodings:',','.join(str(m+1) for m in closestInputs).ljust(15), \
'cell overlap:', cellOverlap, ';', len(closestCells), 'closest set(s):',','.join(str(m+1) for m in closestCells)
return seen
if __name__=='__main__':
if len(sys.argv)<2: #Use basil if no dataset specified
print ('Input files required. Read documentation for details.')
else:
dataset = sys.argv[1]
activeCoincsPath = dataset+'/'+dataset+'_spBUOut.txt'
encodingsPath = dataset+'/'+dataset+'_sensorBUOut.txt'
activeCoincsFile=open(activeCoincsPath, 'r')
encodingsFile=open(encodingsPath, 'r')
analyzeOverlaps(activeCoincsFile, encodingsFile, dataset)
| agpl-3.0 |
imochaz/epilepsy-system | seizure prediction code/module/sae_feature/predict.py | 1 | 23289 | import os
import numpy as np
import pandas as pd
from pylearn2.utils import serial
from theano import tensor as T
from theano import function
from scipy.io import loadmat
from sklearn.metrics import confusion_matrix
from data_extraction.epilepsiae import EpilepsiaeFeatureLoaderSAE
from feature_extraction.base import FeatureList
from utils.index_helper import get_list_con_seq_idx
from utils.common_params import Params as params
def predict(model, dataset, batch_size=10):
# Use smallish batches to avoid running out of memory
model.set_batch_size(batch_size)
print "Setting up symbolic expressions..."
X = model.get_input_space().make_theano_batch()
Y = model.fprop(X)
Y = T.argmax(Y, axis=1)
f = function([X], Y)
# Dataset must be multiple of batch size.
m = dataset.X.shape[0]
extra = (batch_size - m) % batch_size
assert (m + extra) % batch_size == 0
if extra > 0:
dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]),
dtype=dataset.X.dtype)),
axis=0)
assert dataset.X.shape[0] % batch_size == 0
# Prediction
print "Performing predictions..."
y = []
for i in xrange(dataset.X.shape[0] / batch_size):
x_arg = dataset.X[i * batch_size:(i + 1) * batch_size, :]
if X.ndim > 2:
x_arg = dataset.get_topological_view(x_arg)
y.append(f(x_arg.astype(X.dtype)))
y = np.concatenate(y)
assert y.ndim == 1
assert y.shape[0] == dataset.X.shape[0]
# Discard any zero-padding that was used to give the batches uniform size
y = y[:m]
return y
def get_prediction_performance(y_hat,
y,
preictal_labels,
ictal_labels,
leave_out_seizure_idx_valid,
leave_out_seizure_idx_test,
figure_dir,
thd_firing_pow,
preictal_sec,
segment_sec,
use_available_preictal):
y_true = np.where(y)[0]
y_preictal_withheld = np.where(preictal_labels == 1)[0]
y_preictal_select = np.where(preictal_labels == 2)[0]
y_preictal_remove = np.where(preictal_labels == 3)[0]
y_ictal = np.where(ictal_labels)[0]
assert np.all(y_true == y_preictal_withheld)
print 'n_preictal_segments: ' + str(y_true.size)
list_con_idx_y_true = get_list_con_seq_idx(y_true)
list_con_idx_y_preictal_select = get_list_con_seq_idx(y_preictal_select)
list_con_idx_y_preictal_remove = get_list_con_seq_idx(y_preictal_remove)
list_con_idx_y_ictal = get_list_con_seq_idx(y_ictal)
# Get prediction results with decision making
if use_available_preictal:
n_preictal_samples = y_true.size * 1.0
else:
n_preictal_samples = preictal_sec / (segment_sec * 1.0)
firing_pow = np.zeros(y_hat.size, dtype=float)
for i in np.arange(y_hat.size):
start_idx = (i + 1) - n_preictal_samples
end_idx = i + 1
if start_idx < 0:
start_idx = 0
firing_pow[i] = np.sum(y_hat[start_idx:end_idx]) / n_preictal_samples
# # Further normalize the results from the decision making to [0, 1]
# from sklearn import preprocessing
# min_max_scaler = preprocessing.MinMaxScaler()
# norm_firing_pow = min_max_scaler.fit_transform(firing_pow)
# Limit raising alarms (1)
# Even after a preictal period is passed, further alarm cannot be raised just by noticing firing_pow value above the threshold,
# and the firing_pow must first fall below the threshold level, resetting the normal alarm generation
y_pred = np.where(firing_pow >= thd_firing_pow)[0]
if y_pred.size > 0:
list_con_idx_y_pred = get_list_con_seq_idx(y_pred)
y_pred_limit = np.zeros(y_hat.size, dtype=int)
for i in range(list_con_idx_y_pred.size):
y_pred_limit[y_pred[list_con_idx_y_pred[i][0]]] = 1
all_alarms = np.where(y_pred_limit)[0]
else:
all_alarms = np.empty(0, dtype=int)
# Limit raising alarms (2)
# Generation of more alarms is inhibited for as long as a preictal period
alarms = np.empty(0, dtype=int)
for a_idx, alarm in enumerate(all_alarms):
if a_idx > 0:
if (alarm - alarms[-1]) > n_preictal_samples:
alarms = np.append(alarms, alarm)
else:
alarms = np.append(alarms, alarm)
inhibited_alarms = np.setdiff1d(all_alarms, alarms)
assert np.all(np.union1d(alarms, inhibited_alarms) == all_alarms)
# Remove the alarm triggered from not considered seizures FP
rm_select_sz_alarms = np.intersect1d(alarms, y_preictal_select)
rm_remove_sz_alarms = np.intersect1d(alarms, y_preictal_remove)
alarms = np.setdiff1d(alarms, y_preictal_select)
alarms = np.setdiff1d(alarms, y_preictal_remove)
# Compute confusion matrix
cm = confusion_matrix(y, y_hat)
np.set_printoptions(precision=2)
print 'Confusion matrix, without normalization'
print cm
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print 'Normalized confusion matrix'
print cm_normalized
# After applying the decision making
tp = np.intersect1d(alarms, y_true)
fp = np.setdiff1d(alarms, y_true)
if tp.size > 0:
time_before_ictal = ((y_true[-1] + 1) - tp[0]) * (segment_sec * 1.0)
else:
time_before_ictal = -1.0
detected_sz = 0
if tp.size > 0:
detected_sz = 1
result = pd.DataFrame({
'n_seizures': 1, # Number of withheld seizures (i.e., 1 for LOOCV)
'detected_sz': detected_sz, # Number of detected seizures
'fp': fp.size, # Number of false alarms (i.e., false positives)
't_before_sz': time_before_ictal # Seconds before the onset of the withheld seizures
}, index=[0])
print ''
print result
print ''
######################################################################
# Plot prediction results
######################################################################
import matplotlib.pyplot as plt
fig_dpi = 80
fig_width = 1500/fig_dpi
fig_height = 800/fig_dpi
fig_save_dpi = 200
plt.figure(figsize=(fig_width, fig_height), dpi=fig_dpi)
# Highlight selected preictal periods
with_label = True
for i in range(list_con_idx_y_preictal_select.size):
# Index of preictal period
start_highlight_idx = y_preictal_select[list_con_idx_y_preictal_select[i]][0]
end_highlight_idx = y_preictal_select[list_con_idx_y_preictal_select[i]][-1]
if with_label:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='c',
alpha=0.1, edgecolor='none',
label='Preictal (selected)')
with_label = False
else:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='c',
alpha=0.1, edgecolor='none')
# Highlight removed preictal periods
with_label = True
for i in range(list_con_idx_y_preictal_remove.size):
# Index of preictal period
start_highlight_idx = y_preictal_remove[list_con_idx_y_preictal_remove[i]][0]
end_highlight_idx = y_preictal_remove[list_con_idx_y_preictal_remove[i]][-1]
if with_label:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='k',
alpha=0.1, edgecolor='none',
label='Preictal (removed)')
with_label = False
else:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='k',
alpha=0.1, edgecolor='none')
# Highlight the withheld preictal periods
with_label = True
for i in range(list_con_idx_y_true.size):
# Index of preictal period
start_highlight_idx = y_true[list_con_idx_y_true[i]][0]
end_highlight_idx = y_true[list_con_idx_y_true[i]][-1]
if with_label:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='orange',
alpha=0.3, edgecolor='none',
label='Preictal')
with_label = False
else:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='orange',
alpha=0.3, edgecolor='none')
# Highlight ictal periods and add ictal onset
with_label = True
for i in range(list_con_idx_y_ictal.size):
# Index of ical period
start_highlight_idx = y_ictal[list_con_idx_y_ictal[i]][0]
end_highlight_idx = y_ictal[list_con_idx_y_ictal[i]][-1]
if with_label:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='r',
alpha=0.3, edgecolor='none',
label='Ictal')
plt.axvline(start_highlight_idx, ymin=-0.2, ymax=1.15/1.2, color='k',
linestyle='-', linewidth=2.5,
marker='x', markersize=8, markeredgewidth=2.5,
label='Ictal onset')
with_label = False
else:
plt.axvspan(start_highlight_idx, end_highlight_idx + 1, ymax=1.15/1.2, color='r',
alpha=0.3, edgecolor='none')
plt.axvline(start_highlight_idx, ymin=-0.2, ymax=1.15/1.2, color='k',
linestyle='-', linewidth=2.5,
marker='x', markersize=8, markeredgewidth=2.5)
# Alarm
with_label = True
for a in alarms:
if with_label:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='r', linewidth=2.5,
marker='^', markeredgecolor='r', markersize=8, markeredgewidth=2.5,
label='Alarm')
with_label = False
# To avoid having multiple 'Alarm' legends
else:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='r', linewidth=2.5,
marker='^', markeredgecolor='r', markersize=8, markeredgewidth=2.5)
# Inhibited near alarm
with_label = True
for a in inhibited_alarms:
if with_label:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='r', linewidth=2.5,
marker='^', markeredgecolor='r', markersize=8, markeredgewidth=2.5,
label='Alarm (inhibited)', alpha=0.3)
with_label = False
# To avoid having multiple 'Alarm' legends
else:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='r', linewidth=2.5,
marker='^', markeredgecolor='r', markersize=8, markeredgewidth=2.5,
alpha=0.3)
# Selected seizure alarm
with_label = True
for a in rm_select_sz_alarms:
if with_label:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='c', linewidth=2.5,
marker='^', markeredgecolor='c', markersize=8, markeredgewidth=2.5,
label='Alarm (selected)', alpha=0.3)
with_label = False
# To avoid having multiple 'Alarm' legends
else:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='c', linewidth=2.5,
marker='^', markeredgecolor='c', markersize=8, markeredgewidth=2.5,
alpha=0.3)
# Removed seizure alarm
with_label = True
for a in rm_remove_sz_alarms:
if with_label:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='k', linewidth=2.5,
marker='^', markeredgecolor='k', markersize=8, markeredgewidth=2.5,
label='Alarm (removed)', alpha=0.3)
with_label = False
# To avoid having multiple 'Alarm' legends
else:
plt.axvline(a, ymin=-0.2, ymax=1.15/1.2,
linestyle='-', color='k', linewidth=2.5,
marker='^', markeredgecolor='k', markersize=8, markeredgewidth=2.5,
alpha=0.3)
plt.axhline(thd_firing_pow, color='green', linestyle='--', label='Threshold')
plt.plot(range(y_hat.size), y_hat, 'k', alpha=0.4, label='SAE output')
plt.plot(range(firing_pow.size), firing_pow, 'b', linewidth=2, label='FP output')
leg = plt.legend(ncol=3)
leg.get_frame().set_alpha(0.7)
plt.ylim(0, 1.2)
plt.yticks([0, thd_firing_pow, 1])
plt.xlabel('Samples', fontsize=18)
plt.ylabel('Firing Power (FP)', fontsize=18)
plt.title('Threshold=' + str(thd_firing_pow) + ', Fold-CV=' + str(leave_out_seizure_idx_valid) +
', Fold-test=' + str(leave_out_seizure_idx_test), fontsize=20)
plt.tight_layout()
plt.xlim(xmin=0, xmax=y_hat.size)
plt.savefig(os.path.join(figure_dir, 'prediction_thd_' + str(thd_firing_pow) +
'_fold_' + str(leave_out_seizure_idx_valid) + '_' +
str(leave_out_seizure_idx_test) + '.png'),
dpi=fig_save_dpi)
######################################################################
plt.close('all')
return result
def predict_loocv(patient_id, module_dir, data_dir,
list_thd_firing_pow, preictal_sec,
list_features, n_selected_features,
use_all_nonictals, use_available_preictal_period):
# Get ictal (or seizure) information
metadata_fname = os.path.join(os.path.join(data_dir, patient_id), 'trainset_' + str(preictal_sec) + '.mat')
metadata_mat = loadmat(metadata_fname)
n_seizures = metadata_mat.get('ictals').size
segment_sec = metadata_mat.get('segment_sec')[0][0]
n_extended_blocks_test = metadata_mat.get('n_extended_blocks_test')[0][0]
n_hours_each_fold = 1.0 + (2 * n_extended_blocks_test) # in hours
print 'Use a model in ' + module_dir
print 'Preictal seconds: ' + str(preictal_sec)
print 'Use all nonictal data: ' + str(use_all_nonictals) + ' (this will not affect for ''valid'' and ''test'' sets)'
print 'Use only available preictal period in the smooth the SAE output: ' + str(use_available_preictal_period)
print 'List of threshold:', list_thd_firing_pow
thd_results = []
test_results = np.empty(list_thd_firing_pow.size, dtype=object)
for thd_idx, thd_firing_pow in enumerate(list_thd_firing_pow):
print ''
print '-------------================ Threshold ' + str(thd_firing_pow) + ' ================-------------'
t_results = []
list_leave_out_seizure_idx_test = np.arange(n_seizures)
valid_results = np.empty(list_leave_out_seizure_idx_test.size, dtype=object)
for leave_out_seizure_idx_test in list_leave_out_seizure_idx_test:
v_results = []
# list_leave_out_seizure_idx_valid = np.setdiff1d(np.arange(n_seizures), leave_out_seizure_idx_test)
list_leave_out_seizure_idx_valid = np.asarray([leave_out_seizure_idx_test])
for leave_out_seizure_idx_valid in list_leave_out_seizure_idx_valid:
save_model_dir = os.path.join(module_dir, patient_id + '/models_' + str(preictal_sec) + '_' +
str(leave_out_seizure_idx_valid) + '_' +
str(leave_out_seizure_idx_test))
model_path = os.path.join(save_model_dir, 'sae.pkl')
# Get data set for each fold
dataset = EpilepsiaeFeatureLoaderSAE(patient_id=patient_id,
which_set='valid',
list_features=list_features,
leave_out_seizure_idx_valid=leave_out_seizure_idx_valid,
leave_out_seizure_idx_test=leave_out_seizure_idx_test,
data_dir=data_dir,
preictal_sec=preictal_sec,
use_all_nonictals=use_all_nonictals,
n_selected_features=n_selected_features,
preprocessor_dir=save_model_dir)
# Load model
model = serial.load(model_path)
# Get prediction
y_hat = predict(model, dataset)
# Get ground truth
y = np.argmax(dataset.y, axis=1)
# Get all preictal labels (this might contain other preictal data due to the extended blocks contain seizures)
preictal_labels = dataset.y_label_all
# Get ictal (or seizure) labels
ictal_labels = dataset.ictal_labels
# Get prediction performance
result = get_prediction_performance(y_hat=y_hat,
y=y,
preictal_labels=preictal_labels,
ictal_labels=ictal_labels,
leave_out_seizure_idx_valid=leave_out_seizure_idx_valid,
leave_out_seizure_idx_test=leave_out_seizure_idx_test,
figure_dir=module_dir + '/' + patient_id,
thd_firing_pow=thd_firing_pow,
preictal_sec=preictal_sec,
segment_sec=segment_sec,
use_available_preictal=use_available_preictal_period)
result.loc[:,'sz_fname'] = pd.Series(metadata_mat['ictals'][leave_out_seizure_idx_valid][0]['filename'][0][0][0][0][0],
index=result.index)
v_results.append(result)
valid_results[leave_out_seizure_idx_test] = pd.concat(v_results)
# Get the statistics of the results
n_seizures = np.sum(valid_results[leave_out_seizure_idx_test]['n_seizures'].values)
n_detected_sz = np.sum(valid_results[leave_out_seizure_idx_test]['detected_sz'].values)
n_fp = np.sum(valid_results[leave_out_seizure_idx_test]['fp'].values)
t_before_sz = np.sum(valid_results[leave_out_seizure_idx_test]['t_before_sz'][
valid_results[leave_out_seizure_idx_test]['t_before_sz'] >= 0])
sz_fname = metadata_mat['ictals'][leave_out_seizure_idx_test][0]['filename'][0][0][0][0][0]
result = pd.DataFrame({
'n_seizures': n_seizures,
'n_detected_sz': n_detected_sz,
'n_fp': n_fp,
't_before_sz': t_before_sz,
'n_hour': (n_seizures * n_hours_each_fold),
'leave_out_sz': leave_out_seizure_idx_test,
'sz_fname': sz_fname
}, index=[0])
t_results.append(result)
test_results[thd_idx] = pd.concat(t_results)
# Get the statistics of the results
n_seizures = np.sum(test_results[thd_idx]['n_seizures'].values)
n_detected_sz = np.sum(test_results[thd_idx]['n_detected_sz'].values)
n_fp = np.sum(test_results[thd_idx]['n_fp'].values)
t_before_sz = np.sum(test_results[thd_idx]['t_before_sz'][test_results[thd_idx]['t_before_sz'] >= 0])
sensitivity = (n_detected_sz * 1.0) / n_seizures
fpr = (n_fp * 1.0) / (n_seizures * n_hours_each_fold)
if n_detected_sz > 0:
avg_t_before_sz = (t_before_sz * 1.0) / n_detected_sz
else:
avg_t_before_sz = -1.0
t_before_sz = -1.0
result = pd.DataFrame({
'n_seizures': n_seizures,
'n_detected_sz': n_detected_sz,
'n_fp': n_fp,
't_before_sz': t_before_sz,
'sensitivity': sensitivity,
'fpr': fpr,
'avg_t_before_sz': avg_t_before_sz,
'n_hour': (n_seizures * n_hours_each_fold),
'threshold': thd_firing_pow
}, index=[0])
thd_results.append(result)
summary = pd.concat(thd_results)
print ''
print ''
print '-------------================ Summary ================-------------'
print ''
for thd_idx, thd_firing_pow in enumerate(list_thd_firing_pow):
print 'Result for threshold=' + str(thd_firing_pow) + ':'
print test_results[thd_idx]
print ''
print ''
print summary
def main():
patient_id = 'pat_102'
data_dir = params.DATA_DIR
n_layers = 2
module_dir = os.path.join(params.MODULE_DIR, 'sae_feature/sae_' + str(n_layers) + '_layer')
n_selected_features = -1 # Don't use feature selection
preictal_sec = 40 * 60
use_all_nonictals = True
use_available_preictal_period = True
list_thd_firing_pow = np.asarray([0.1, 0.2, 0.3, 0.4, 0.5])
predict_loocv(patient_id=patient_id,
module_dir=module_dir,
data_dir=data_dir,
list_thd_firing_pow=list_thd_firing_pow,
preictal_sec=preictal_sec,
list_features=[
FeatureList.HRV_IBI_MEAN,
FeatureList.HRV_IBI_SDNN,
FeatureList.HRV_IBI_RMSSD,
FeatureList.HRV_pVLF,
FeatureList.HRV_pLF,
FeatureList.HRV_pHF,
FeatureList.HRV_LFHF,
FeatureList.EEG_RSP_NORM_SPEC_POW,
FeatureList.EEG_RSP_SMOOTH_RS_NORM,
FeatureList.EEG_PHASE_ENTROPY,
FeatureList.EEG_ECG_PHASE_ENTROPY,
FeatureList.EEG_IBI_PHASE_ENTROPY,
FeatureList.EEG_POWER_ECG_PHASE,
FeatureList.EEG_POWER_IBI_PHASE,
FeatureList.EEG_PHASE_ECG_POWER,
FeatureList.EEG_PHASE_IBI_POWER,
FeatureList.EEG_POWER_ECG_POWER,
FeatureList.EEG_POWER_IBI_POWER
],
n_selected_features=n_selected_features,
use_all_nonictals=use_all_nonictals,
use_available_preictal_period=use_available_preictal_period)
if __name__ == '__main__':
main()
| bsd-3-clause |
peterk87/sistr_cmd | tests/test_writer_util.py | 1 | 3236 | from sistr.src.writers import to_dict, flatten_dict
import numpy as np
import pandas as pd
class Test1:
a = 1
b = 3.2
c = 'abc'
z = False
def __init__(self):
self.i = 9
self.j = 0.56
self.k = 'test'
self.l = [1, 2, 3]
self.d = {'a':1, 'b':2}
class Test2:
q = 6
w = True
e = 5.678
r = [8.9, 10.1, 2.3, 4.5]
def __init__(self, test1):
self.test1 = test1
a = 6
b = 7.8
c = 'string'
d = {'key1': 'val1', 'key2': 'val2'}
l = ['a', 'b', 'c']
def test_complex_obj_to_dict():
t1 = Test1()
t2 = Test2(t1)
t2_dict = to_dict(t2, 0)
assert isinstance(t2_dict, dict)
exp_t2_dict = {'q': 6,
'test1': {'a': 1,
'c': 'abc',
'b': 3.2,
'd': {'a': 1,
'b': 2},
'i': 9,
'k': 'test',
'j': 0.56,
'l': [1, 2, 3],
'z': False},
'r': [8.9, 10.1, 2.3, 4.5],
'e': 5.678,
'w': True}
assert t2_dict == exp_t2_dict
exp_t2_dict_depth2 = {'q': 6,
'test1': {'a': 1,
'c': 'abc',
'b': 3.2,
'i': 9,
'k': 'test',
'j': 0.56,
'z': False},
'r': [8.9, 10.1, 2.3, 4.5],
'e': 5.678,
'w': True}
t2_dict_depth2 = to_dict(t2, 0, depth_threshold=2)
assert t2_dict_depth2 == exp_t2_dict_depth2
exp_t2_dict = {
'r': [8.9, 10.1, 2.3, 4.5],
'e': 5.678,
'w': True}
t2_dict = to_dict(t2, 0, exclude_keys={'test1', 'q'})
assert t2_dict == exp_t2_dict
exp_t2_dict = {'q': 6,
'test1': {
'c': 'abc',
'b': 3.2,
'd': {
'b': 2},
'i': 9,
'k': 'test',
'j': 0.56,
'l': [1, 2, 3],
'z': False},
'r': [8.9, 10.1, 2.3, 4.5],
'e': 5.678,
'w': True}
t2_dict = to_dict(t2, 0, exclude_keys={'a'})
assert t2_dict == exp_t2_dict
def test_numpy_numbers_to_dict():
import json
class ABC:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
t = [{'a': 1.90,
'b': 7.8,
'c': 99},
{'a': 1.90,
'b': 80.6,
'c': 9999},
{'a': 13.90,
'b': 78.5,
'c': 999},]
df = pd.DataFrame(t)
for i, r in df.iterrows():
c = r.c
c = c.astype(np.int64)
abc = ABC(r.a, r.b, c)
assert json.dumps(to_dict(abc, 0), sort_keys=True) == json.dumps(t[i], sort_keys=True)
| apache-2.0 |
rs2/pandas | pandas/core/window/expanding.py | 1 | 7152 | from textwrap import dedent
from typing import Dict, Optional
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, doc
from pandas.core.window.common import WindowGroupByMixin, _doc_template, _shared_docs
from pandas.core.window.rolling import RollingAndExpandingMixin
class Expanding(RollingAndExpandingMixin):
"""
Provide expanding transformations.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=None, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None, **kwargs):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.aggregate : Similar DataFrame method.
pandas.Series.aggregate : Similar Series method.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})
>>> df
A B C
0 1 4 7
1 2 5 8
2 3 6 9
>>> df.ewm(alpha=0.5).mean()
A B C
0 1.000000 4.000000 7.000000
1 1.666667 4.666667 7.666667
2 2.428571 5.428571 8.428571
"""
)
@doc(
_shared_docs["aggregate"],
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
klass="Series/Dataframe",
axis="",
)
def aggregate(self, func, *args, **kwargs):
return super().aggregate(func, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(
self,
func,
raw: bool = False,
engine: Optional[str] = None,
engine_kwargs: Optional[Dict[str, bool]] = None,
args=None,
kwargs=None,
):
return super().apply(
func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding", func_name="max")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding", versionadded="")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding", versionadded="")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="expanding", func_name="skew")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show an expanding calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> import scipy.stats
>>> print(f"{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}")
-1.200000
>>> print(f"{scipy.stats.kurtosis(arr, bias=False):.6f}")
4.999874
>>> s = pd.Series(arr)
>>> s.expanding(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 4.999874
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="expanding")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="expanding", func_name="cov")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class ExpandingGroupby(WindowGroupByMixin, Expanding):
"""
Provide a expanding groupby implementation.
"""
@property
def _constructor(self):
return Expanding
| bsd-3-clause |
openturns/otlhs | validation/validate_SA_big.py | 1 | 2458 | #! /usr/bin/env python
import openturns as ot
import otlhs
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from openturns.viewer import View
import time
ot.RandomGenerator.SetSeed(0)
ot.Log.Show(ot.Log.INFO)
# Bounds are [0,1]^dimension
dimension = 50
# Size of sample
size = 100
# Factory: lhs generates
lhsDesign = ot.LHSExperiment(ot.ComposedDistribution([ot.Uniform(0.0, 1.0)] * dimension), size)
lhsDesign.setAlwaysShuffle(True) # randomized
geomProfile = otlhs.GeometricProfile(10.0, 0.999, 50000)
c2 = otlhs.SpaceFillingC2()
sa = otlhs.SimulatedAnnealingLHS(lhsDesign, geomProfile, c2)
tic = time.time()
design = sa.generate()
result = sa.getResult()
toc = time.time()
dt1 = toc-tic
print("time=%f"%dt1)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))
crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()
pp = PdfPages('large_OTLHS.pdf')
# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("otlhs_c2_crit_big.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_c2_proba_big.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)
minDist = otlhs.SpaceFillingMinDist()
sa = otlhs.SimulatedAnnealingLHS(lhsDesign, geomProfile, minDist)
tic = time.time()
design = sa.generate()
result = sa.getResult()
toc = time.time()
dt2 = toc-tic
print("time=%f"%dt2)
print("dimension=%d, size=%d,sa=%s"%(dimension, size, sa))
print(str(result.getOptimalValue())+" c2="+str(result.getC2())+" phiP="+str(result.getPhiP())+" minDist="+str(result.getMinDist()))
crit = result.drawHistoryCriterion()
proba = result.drawHistoryProbability()
temp = result.drawHistoryTemperature()
# Criterion
fig = View(crit, plot_kwargs={'color':'blue'}).getFigure()
fig.savefig("otlhs_mindist_crit_big.png")
pp.savefig(fig)
plt.close(fig)
# Proba
fig = View(proba, plot_kwargs={'marker': 'o', 'ms': 0.6}, axes_kwargs={'ylim': [-0.05, 1.05]}).getFigure()
fig.savefig("lhs_mindist_proba_big.png")
pp.savefig(fig)
plt.close(fig)
# Temperature
fig = View(temp).getFigure()
pp.savefig(fig)
plt.close(fig)
pp.close()
| gpl-3.0 |
sinhrks/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
eickenberg/sklearn-theano | examples/plot_asirra_dataset.py | 9 | 1613 | """
===============================================
Asirra dataset classification using transformer
===============================================
This example shows a basic use of the OverfeatTransformer in a scikit-learn
pipeline in order to do classification of natural images.
In this case, the images come from the Asirra dataset functionality built into
sklearn-theano. Plots show one example of each class (cats and dogs).
"""
print(__doc__)
from sklearn_theano.datasets import fetch_asirra
from sklearn_theano.feature_extraction import OverfeatTransformer
from sklearn_theano.utils import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
import time
asirra = fetch_asirra(image_count=20)
X = asirra.images.astype('float32')
y = asirra.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=.6, random_state=1999)
tf = OverfeatTransformer(output_layers=[-3])
clf = LogisticRegression()
pipe = make_pipeline(tf, clf)
t0 = time.time()
pipe.fit(X_train, y_train)
print("Total transform time")
print("====================")
print(time.time() - t0)
print()
y_pred = pipe.predict(X_test)
print(classification_report(y_test, y_pred))
print()
print("Accuracy score")
print("==============")
print(accuracy_score(y_test, y_pred))
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(asirra.images[asirra.target == 0][-1])
axarr[0].axis('off')
axarr[1].imshow(asirra.images[asirra.target == 1][0])
axarr[1].axis('off')
plt.show()
| bsd-3-clause |
facemelters/data-science | pybook.py | 1 | 9915 | import pandas as pd
import numpy as np
import requests
import json
import gspread
import datetime
from time import sleep
from oauth2client.service_account import ServiceAccountCredentials
#Facebook Login and Parameter Setting via Command Line
json_fb_key = json.load(open('./Credentials/fb_api_key.json'))
apikey = json_fb_key['credentials']['apikey'].encode('ascii','ignore')
pageID = str(raw_input('Page ID? '))
num_posts = int(raw_input('How many posts would you like to retrieve? '))
#Google Spreadsheets Login
json_key = './Credentials/'+ pageID + ' Update Client Secret.json'
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name(json_key,scopes=scope)
sheet_key = json.load(open('./Credentials/Facebook Sheet ID.json'))
sheet_key = sheet_key[pageID]
gc = gspread.authorize(credentials)
ws = gc.open_by_key(sheet_key).sheet1
def get_feed(pageID,num_posts):
pages = num_posts // 25
id_list = []
count = 0
endpoint = 'https://graph.facebook.com/v2.7/'+pageID+'/posts?access_token='+apikey
while count <= pages:
response = requests.get(endpoint)
fb_data = response.json()
for item in fb_data['data']:
id_list.append(item['id'])
try:
endpoint = fb_data['paging']['next']
count += 1
except KeyError:
break
return id_list
def getFBdataReach(apikey,postid):
tries = 5
while tries >= 0:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'/insights/post_impressions_unique?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataLinkClicks(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'/insights/post_consumptions_by_type?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataComments(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'/comments?summary=true&access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBdataLikes(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'/likes?summary=true&access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def getFBfanReach(apikey,postid):
tries = 5
while tries >= 5:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'/insights/post_impressions_fan_unique?access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def get_stats_group(apikey,postid):
tries = 5
while tries >= 0:
try:
endpoint = 'https://graph.facebook.com/v2.7/'+postid+'?fields=link,message,description,shares,created_time,name,type&access_token='+apikey
response = requests.get(endpoint)
fb_data = response.json()
return fb_data
except:
if tries == 0:
break
else:
sleep(3)
tries -= 1
continue
def build_sheet():
"""Calls all of the stats gathering funcs, builds lists, and then builds DataFrame"""
container = []
all_fans = get_fan_count()
reach = []
link = []
message = []
description = []
headline = []
shares = []
created_time = []
clicks = []
comments = []
likes = []
types = []
fan_impressions = []
counter = 0
post_id_list = get_feed(pageID,num_posts)
for item in post_id_list:
try:
reach.append(getFBdataReach(apikey,item)['data'][0]['values'][0]['value'])
except:
reach.append(0)
try:
clicks.append(getFBdataLinkClicks(apikey,item)['data'][0]['values'][0]['value']['link clicks'])
except:
clicks.append(0)
try:
likes.append(getFBdataLikes(apikey,item)['summary']['total_count'])
except:
likes.append(0)
try:
comments.append(getFBdataComments(apikey,item)['summary']['total_count'])
except:
comments.append(0)
try:
fan_impressions.append(getFBfanReach(apikey,item)['data'][0]['values'][0]['value'])
except:
fan_impressions.append(0)
try:
container.append(get_stats_group(apikey,item)) #to prevent excessive API calls
except:
continue
print counter, " of", len(post_id_list)
counter += 1
for item in range(len(container)):
try:
link.append(container[item]['link'])
except:
link.append('N/A')
try:
message.append(container[item]['message'])
except:
message.append('N/A')
try:
description.append(container[item]['description'])
except:
description.append('N/A')
try:
headline.append(container[item]['name'])
except:
headline.append('N/A')
try:
shares.append(container[item]['shares']['count'])
except:
shares.append(0)
try:
created_time.append(container[item]['created_time'])
except:
created_time.append(0)
try:
types.append(container[item]['type'])
except:
types.append(0)
insights_df = pd.DataFrame(data = {'Post_ID':post_id_list,'Posted':created_time,'Share Copy':message,'Headline':headline,'Description':description,'Type':types,'Reach':reach,'Clicks':clicks,'Shares':shares,'Comments':comments,'Likes':likes,'Link':link,'Fan_Reach':fan_impressions})
# insights_df['CTR'] = np.round((insights_df['Clicks'] / insights_df['Reach'])*100,2)
insights_df['Post Share Rate'] = insights_df['Shares'] / insights_df['Reach']
insights_df['Posted'] = insights_df['Posted'].apply(lambda x: str(x).split('T')[0])
def set_fans():
fan_holder = []
for x in insights_df["Posted"]:
try:
fan_holder.append(all_fans[x])
except KeyError:
fan_holder.append("n/a")
fan_holder = pd.Series(data=fan_holder)
return fan_holder
insights_df['Total_Fans'] = set_fans()
#I want a Series where the value in the Posted Series matches the key in the dict
return insights_df
#Write a function that grabs the daily total likes of the page, stores the date as
#just the year-month-day as key with fans as value. Then, use created_time to query
#the dictionary and write the corresponding value to insights_df.
def get_fan_count():
fan_count_dict = {}
endpoint = "https://graph.facebook.com/v2.7/"+pageID+"/insights/page_fans/?since=2014-9-22"+"&access_token="+apikey
response = requests.get(endpoint)
print response
data = response.json()
print data
for item in data["data"][0]["values"]:
k = item["end_time"].split("T")[0]
v = item["value"]
fan_count_dict[k] = v
# def fix_date(pubdate):
# #Take a dictionary and applies a split operation elementwise
# return {v: k.split('T')[0] for k,v in pubdate.items()}
return fan_count_dict
def numberToLetters(q):
q = q - 1
result = ''
while q >= 0:
remain = q % 26
result = chr(remain+65) + result;
q = q//26 - 1
return result
def main():
df = build_sheet()
df.to_csv(pageID+'-FB-'+str(datetime.datetime.today().month)+'-'+str(datetime.datetime.today().day)+'.csv',encoding='utf-8')
# Let's send the column names of dataframe to Google
columns = df.columns.values.tolist()
#Login to Google Spreadsheets (Doing this inside func b/c of timeout)
gc = gspread.authorize(credentials)
ws = gc.open_by_key(sheet_key).sheet1
# selection of the range that will be updated
cell_list = ws.range('A1:'+numberToLetters(len(columns))+'1')
# modifying the values in the range
for cell in cell_list:
val = columns[cell.col-1]
if type(val) is str:
val = val.decode('utf-8')
cell.value = val
# update in batch
ws.update_cells(cell_list)
#Now let's send the dataframe via the apikey
# number of lines and columns
num_lines, num_columns = df.shape
# selection of the range that will be updated
cell_list = ws.range('A2:'+numberToLetters(num_columns)+str(num_lines+1))
# modifying the values in the range
for cell in cell_list:
val = df.iloc[cell.row-2,cell.col-1]
if type(val) is str:
val = val.decode('utf-8')
cell.value = val
# update in batch
ws.update_cells(cell_list)
if __name__ == '__main__':
main()
| gpl-2.0 |
HyukjinKwon/spark | python/pyspark/pandas/missing/frame.py | 16 | 4578 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import pandas as pd
from pyspark.pandas.missing import unsupported_function, unsupported_property, common
def _unsupported_function(method_name, deprecated=False, reason=""):
return unsupported_function(
class_name="pd.DataFrame", method_name=method_name, deprecated=deprecated, reason=reason
)
def _unsupported_property(property_name, deprecated=False, reason=""):
return unsupported_property(
class_name="pd.DataFrame", property_name=property_name, deprecated=deprecated, reason=reason
)
class _MissingPandasLikeDataFrame(object):
# Functions
asfreq = _unsupported_function("asfreq")
asof = _unsupported_function("asof")
boxplot = _unsupported_function("boxplot")
combine = _unsupported_function("combine")
combine_first = _unsupported_function("combine_first")
compare = _unsupported_function("compare")
convert_dtypes = _unsupported_function("convert_dtypes")
corrwith = _unsupported_function("corrwith")
cov = _unsupported_function("cov")
ewm = _unsupported_function("ewm")
infer_objects = _unsupported_function("infer_objects")
interpolate = _unsupported_function("interpolate")
lookup = _unsupported_function("lookup")
mode = _unsupported_function("mode")
reorder_levels = _unsupported_function("reorder_levels")
resample = _unsupported_function("resample")
set_axis = _unsupported_function("set_axis")
slice_shift = _unsupported_function("slice_shift")
to_feather = _unsupported_function("to_feather")
to_gbq = _unsupported_function("to_gbq")
to_hdf = _unsupported_function("to_hdf")
to_period = _unsupported_function("to_period")
to_sql = _unsupported_function("to_sql")
to_stata = _unsupported_function("to_stata")
to_timestamp = _unsupported_function("to_timestamp")
tshift = _unsupported_function("tshift")
tz_convert = _unsupported_function("tz_convert")
tz_localize = _unsupported_function("tz_localize")
# Deprecated functions
convert_objects = _unsupported_function("convert_objects", deprecated=True)
select = _unsupported_function("select", deprecated=True)
to_panel = _unsupported_function("to_panel", deprecated=True)
get_values = _unsupported_function("get_values", deprecated=True)
compound = _unsupported_function("compound", deprecated=True)
reindex_axis = _unsupported_function("reindex_axis", deprecated=True)
# Functions we won't support.
to_pickle = common.to_pickle(_unsupported_function)
memory_usage = common.memory_usage(_unsupported_function)
to_xarray = common.to_xarray(_unsupported_function)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
# Deprecated properties
blocks = _unsupported_property("blocks", deprecated=True)
ftypes = _unsupported_property("ftypes", deprecated=True)
is_copy = _unsupported_property("is_copy", deprecated=True)
ix = _unsupported_property("ix", deprecated=True)
# Deprecated functions
as_blocks = _unsupported_function("as_blocks", deprecated=True)
as_matrix = _unsupported_function("as_matrix", deprecated=True)
clip_lower = _unsupported_function("clip_lower", deprecated=True)
clip_upper = _unsupported_function("clip_upper", deprecated=True)
get_ftype_counts = _unsupported_function("get_ftype_counts", deprecated=True)
get_value = _unsupported_function("get_value", deprecated=True)
set_value = _unsupported_function("set_value", deprecated=True)
to_dense = _unsupported_function("to_dense", deprecated=True)
to_sparse = _unsupported_function("to_sparse", deprecated=True)
to_msgpack = _unsupported_function("to_msgpack", deprecated=True)
| apache-2.0 |
jismartin/RedesNegocios | code/leer_datos.py | 1 | 7231 | # -*- coding: utf-8 -*-
"""
@author: nacho
"""
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sqlite3 as lite
import pandas as pd
from os.path import exists
import re
import unicodedata
from sqlalchemy import create_engine
def elimina_tildes(s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
def normaliza_texto1(s):
#s=s.decode('utf8')
s=s.strip()
s=elimina_tildes(s)
s=s.lower()
return re.sub('[\s+]','',s)
def normaliza_texto2(s):
s=elimina_tildes(s)
s=re.sub(';','',s)
s=re.sub(' ','',s)
return s.lower()
def normaliza_texto3(s):
#s=s.decode('utf8')
s=elimina_tildes(s)
return s
def buscar_ciudad(s):
if s.find('PALENCIA'):
return
ciudades=pd.Series(['AVILA','BURGOS','PALENCIA','SALAMANCA','SEGOVIA','VALLADOLID','MADRID','BARCELONA'])
# Para almacenar posibles errores
errores = []
# Necesitamos una lista con las rutas a los ficheros de capturas de datos
# En linux shell es muy fácil:
# find ./ -name "*.csv" > ficheros.txt
name=input('Localización: ')
ficheros_lista = pd.read_csv(name + '.txt', names=['ruta'])
n_muestras = 0
total_muestras = len(ficheros_lista.ruta)
# Conectarse a la base de datos
#con = lite.connect(input('Nombre base de datos: '))
#cur = con.cursor()
datos_completos=pd.DataFrame({"clase":[],"subclase":[],"busqueda":[],"nombre":[],"categoria_web":[],"direccion":[],
"cp":[],"ciudad":[],"longitud":[],"latitud":[],"archivo":[]})
n_registros_iniciales=0
n_registros_finales=0
for ruta in ficheros_lista.ruta:
if exists(ruta):
n_muestras = n_muestras + 1
print("Procesando muestra %d de un total de %d" % (n_muestras, total_muestras))
# Número de tipo de negocio
numeros = [ int(s) for s in re.findall(r'\d+',ruta)]
clase = numeros[0]
# Subclase de negocio
subclase=re.split('\d+',ruta)[1]
# Eliminar caracteres
subclase=re.sub('_','',subclase)
subclase=re.sub('.csv','',subclase)
subclase=normaliza_texto1(subclase)
# Leer datos de capturados
datos = pd.read_table(ruta,sep=',',encoding='utf-8',error_bad_lines=True,warn_bad_lines=True,header=0,na_values=[""," "])
datos=datos[datos.columns[:11]]
datos.columns= ["nombre","categoria_web","email","telefono","direccion","cp","ciudad","web","pahttp","longitud","latitud"]
# eliminar columnas no necesarias
datos.drop('email',axis=1,inplace=True)
datos.drop('telefono',axis=1,inplace=True)
datos.drop('web',axis=1,inplace=True)
datos.drop('pahttp',axis=1,inplace=True)
#datos.drop('x',axis=1,inplace=True)
# eliminar registros sin posibilidad de tratamiento (sin nombre y dirección)
n_inicial = len(datos)
n_registros_iniciales+=n_inicial
datos= datos.dropna(subset=['direccion','cp','ciudad','nombre'])
n_final = len(datos)
#n_registros_finales+=n_final
# rellenar con 0 posibles campos de longitud, latitud vacíos
#datos.fillna(0,inplace=True)
datos[['longitud','latitud']]=datos[['longitud','latitud']].fillna(0)
# convertir a número
datos.longitud=pd.to_numeric(datos.longitud,errors='coerce')
datos.latitud=pd.to_numeric(datos.latitud,errors='coerce')
datos.cp=pd.to_numeric(datos.cp,errors='coerce')
datos.longitud.fillna(0,inplace=True)
datos.latitud.fillna(0,inplace=True)
# normalizar texto
datos.nombre = [ normaliza_texto2(s) for s in datos.nombre ]
datos.direccion = [ normaliza_texto2(s) for s in datos.direccion ]
datos.ciudad = [ normaliza_texto2(s) for s in datos.ciudad ]
datos.categoria_web = [ normaliza_texto2(s) for s in datos.categoria_web ]
# errores
n_sin_posicion = len(datos[(datos.longitud==0) & (datos.latitud==0)])
if n_sin_posicion >0:
errores.append(tuple((normaliza_texto3(ruta), ('%i negocios sin geolocalizacion guardados' % n_sin_posicion ))))
if n_final > n_inicial:
errores.append(tuple((normaliza_texto3(ruta), ('%i registros sin posibilidad de tratamiento eliminados' % (n_inicial-n_final) ))))
# añadir clase y subclase
datos['clase']=clase
datos['subclase']=subclase
datos['archivo']=ruta
datos['busqueda']=ciudades[ [x in ruta.upper() for x in ciudades.values]].values[0]
# Añadir a datos completos
datos_completos = datos_completos.append(datos)
# # Copiar en base de datos
# cur.executemany("INSERT INTO negocios (clase, subclase, nombre, categoria_web, direccion, cp, ciudad, longitud, latitud)" +
# " VALUES(?,?,?,?,?,?,?,?,?);", [tuple([clase,subclase]) + tuple(x) for x in datos[['nombre', 'categoria_web', 'direccion', 'cp', 'ciudad', 'longitud', 'latitud']].values])
# con.commit()
else:
errores.append(tuple((normaliza_texto3(ruta),'No existe fichero csv')))
#if len(errores) > 0:
# cur.executemany("INSERT INTO errores (ruta, error)" +
# " VALUES(?,?);", [e for e in errores])
# con.commit()
#
# for e in errores:
# print e
# cur.executemany("INSERT INTO errores (ruta, error)" +
# " VALUES(?,?);", [e])
# con.commit()
n_registros_finales=datos_completos.shape[0]
n_registros_sin_tratamiento=n_registros_iniciales - n_registros_finales
# Reinicializar índices
datos_completos.reset_index(drop=True,inplace=True)
# Eliminar duplicados
datos_completos.drop_duplicates(inplace=True)
n_registros_duplicados=n_registros_finales - datos_completos.shape[0]
errores.append(tuple((name,'Registros (Iniciales %i; Sin tratamiento %i; Duplicados %i; Guardados %i)' %
(n_registros_iniciales,n_registros_sin_tratamiento,n_registros_duplicados,datos_completos.shape[0]))))
# Guardar como csv (con nombre del archivo)
datos_completos.to_csv('datos_' + name +'2.csv')
#datos_completos.drop(['archivo'],axis=1,inplace=True)
# Guardar como sqlite
engine = create_engine('sqlite:///datos_' + name +'.sqlite')
conn = engine.connect()
datos_completos.to_sql('negocios',con=conn, if_exists='replace',index=False)
# Guardar como csv
datos_completos.to_csv('datos_' + name +'.csv')
# Guardar errore como csv
(pd.DataFrame(errores)).to_csv('errores_' + name + '.csv')
#con.close()
# datos sin geolocalizacion
resumen=[]
for c in datos_completos['clase'].unique():
total = (datos_completos['clase']==c).sum()
sin = ((datos_completos['clase']==c) &
( (datos_completos['latitud']==0) | (datos_completos['longitud']==0))).sum()
resumen.append(list((int(c),sin,sin/total)))
resumen=pd.DataFrame(resumen,columns=['clase','sin_geolocalizacion','sin_geolocalizacion_porcentaje'])
# Guardar como csv
resumen.to_csv('resumen_' + name +'.csv')
datos_completos[(datos_completos['latitud']==0) | (datos_completos['longitud']==0)].to_csv('sin_geolocalizacion_'+name+'.csv')
| gpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/matplotlib/tests/test_quiver.py | 4 | 2866 | from __future__ import print_function
import os
import tempfile
import numpy as np
import sys
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import cleanup
from matplotlib.testing.decorators import image_comparison
def draw_quiver(ax, **kw):
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, 1),
np.arange(0, 2 * np.pi, 1))
U = np.cos(X)
V = np.sin(Y)
Q = ax.quiver(U, V, **kw)
return Q
@cleanup
def test_quiver_memory_leak():
fig, ax = plt.subplots()
Q = draw_quiver(ax)
ttX = Q.X
Q.remove()
del Q
assert sys.getrefcount(ttX) == 2
@cleanup
def test_quiver_key_memory_leak():
fig, ax = plt.subplots()
Q = draw_quiver(ax)
qk = ax.quiverkey(Q, 0.5, 0.92, 2, r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'})
assert sys.getrefcount(qk) == 3
qk.remove()
assert sys.getrefcount(qk) == 2
@image_comparison(baseline_images=['quiver_animated_test_image'],
extensions=['png'])
def test_quiver_animate():
# Tests fix for #2616
fig, ax = plt.subplots()
Q = draw_quiver(ax, animated=True)
qk = ax.quiverkey(Q, 0.5, 0.92, 2, r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'})
@image_comparison(baseline_images=['quiver_with_key_test_image'],
extensions=['png'])
def test_quiver_with_key():
fig, ax = plt.subplots()
ax.margins(0.1)
Q = draw_quiver(ax)
qk = ax.quiverkey(Q, 0.5, 0.95, 2,
r'$2\, \mathrm{m}\, \mathrm{s}^{-1}$',
coordinates='figure',
labelpos='W',
fontproperties={'weight': 'bold',
'size': 'large'})
@image_comparison(baseline_images=['quiver_single_test_image'],
extensions=['png'], remove_text=True)
def test_quiver_single():
fig, ax = plt.subplots()
ax.margins(0.1)
ax.quiver([1], [1], [2], [2])
@cleanup
def test_quiver_copy():
fig, ax = plt.subplots()
uv = dict(u=np.array([1.1]), v=np.array([2.0]))
q0 = ax.quiver([1], [1], uv['u'], uv['v'])
uv['v'][0] = 0
assert q0.V[0] == 2.0
@image_comparison(baseline_images=['quiver_key_pivot'],
extensions=['png'], remove_text=True)
def test_quiver_key_pivot():
fig, ax = plt.subplots()
u, v = np.mgrid[0:2*np.pi:10j, 0:2*np.pi:10j]
q = ax.quiver(np.sin(u), np.cos(v))
ax.set_xlim(-2, 11)
ax.set_ylim(-2, 11)
ax.quiverkey(q, 0.5, 1, 1, 'N', labelpos='N')
ax.quiverkey(q, 1, 0.5, 1, 'E', labelpos='E')
ax.quiverkey(q, 0.5, 0, 1, 'S', labelpos='S')
ax.quiverkey(q, 0, 0.5, 1, 'W', labelpos='W')
if __name__ == '__main__':
import nose
nose.runmodule()
| mit |
pprett/statsmodels | statsmodels/examples/ex_generic_mle_t.py | 1 | 10793 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 08:28:04 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats, special
import statsmodels.api as sm
from statsmodels.base.model import GenericLikelihoodModel
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
def maxabs(arr1, arr2):
return np.max(np.abs(arr1 - arr2))
def maxabsrel(arr1, arr2):
return np.max(np.abs(arr2 / arr1 - 1))
class MyT(GenericLikelihoodModel):
'''Maximum Likelihood Estimation of Poisson Model
This is an example for generic MLE which has the same
statistical model as discretemod.Poisson.
Except for defining the negative log-likelihood method, all
methods and results are generic. Gradients and Hessian
and all resulting statistics are based on numerical
differentiation.
'''
def loglike(self, params):
return -self.nloglikeobs(params).sum(0)
# copied from discretemod.Poisson
def nloglikeobs(self, params):
"""
Loglikelihood of Poisson model
Parameters
----------
params : array-like
The parameters of the model.
Returns
-------
The log likelihood of the model evaluated at `params`
Notes
--------
.. math :: \\ln L=\\sum_{i=1}^{n}\\left[-\\lambda_{i}+y_{i}x_{i}^{\\prime}\\beta-\\ln y_{i}!\\right]
"""
#print len(params),
beta = params[:-2]
df = params[-2]
scale = params[-1]
loc = np.dot(self.exog, beta)
endog = self.endog
x = (endog - loc)/scale
#next part is stats.t._logpdf
lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)
lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)
lPx -= np_log(scale) # correction for scale
return -lPx
#Example:
np.random.seed(98765678)
nobs = 1000
rvs = np.random.randn(nobs,5)
data_exog = sm.add_constant(rvs)
xbeta = 0.9 + 0.1*rvs.sum(1)
data_endog = xbeta + 0.1*np.random.standard_t(5, size=nobs)
#print data_endog
modp = MyT(data_endog, data_exog)
modp.start_value = np.ones(data_exog.shape[1]+2)
modp.start_value[-2] = 10
modp.start_params = modp.start_value
resp = modp.fit(start_params = modp.start_value)
print resp.params
print resp.bse
from statsmodels.sandbox.regression.numdiff import approx_fprime1, approx_hess
hb=-approx_hess(modp.start_value, modp.loglike, epsilon=-1e-4)[0]
tmp = modp.loglike(modp.start_value)
print tmp.shape
'''
>>> tmp = modp.loglike(modp.start_value)
8
>>> tmp.shape
(100,)
>>> tmp.sum(0)
-24220.877108016182
>>> tmp = modp.nloglikeobs(modp.start_value)
8
>>> tmp.shape
(100, 100)
>>> np.dot(modp.exog, beta).shape
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'beta' is not defined
>>> params = modp.start_value
>>> beta = params[:-2]
>>> beta.shape
(6,)
>>> np.dot(modp.exog, beta).shape
(100,)
>>> modp.endog.shape
(100, 100)
>>> xbeta.shape
(100,)
>>>
'''
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
repr(start_params) array([ 1., 1., 1., 1., 1., 1., 1., 1.])
Optimization terminated successfully.
Current function value: 91.897859
Iterations: 108
Function evaluations: 173
Gradient evaluations: 173
[ 1.58253308e-01 1.73188603e-01 1.77357447e-01 2.06707494e-02
-1.31174789e-01 8.79915580e-01 6.47663840e+03 6.73457641e+02]
[ NaN NaN NaN NaN NaN
28.26906182 NaN NaN]
()
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>> resp.bse
array([ NaN, NaN, NaN, NaN,
NaN, 28.26906182, NaN, NaN])
>>> resp.jac
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'GenericLikelihoodModelResults' object has no attribute 'jac'
>>> resp.bsejac
array([ 45243.35919908, 51997.80776897, 41418.33021984,
42763.46575168, 50101.91631612, 42804.92083525,
3005625.35649203, 13826948.68708931])
>>> resp.bsejhj
array([ 1.51643931, 0.80229636, 0.27720185, 0.4711138 , 0.9028682 ,
0.31673747, 0.00524426, 0.69729368])
>>> resp.covjac
array([[ 2.04696155e+09, 1.46643494e+08, 7.59932781e+06,
-2.39993397e+08, 5.62644255e+08, 2.34300598e+08,
-3.07824799e+09, -1.93425470e+10],
[ 1.46643494e+08, 2.70377201e+09, 1.06005712e+08,
3.76824011e+08, -1.21778986e+08, 5.38612723e+08,
-2.12575784e+10, -1.69503271e+11],
[ 7.59932781e+06, 1.06005712e+08, 1.71547808e+09,
-5.94451158e+07, -1.44586401e+08, -5.41830441e+06,
1.25899515e+10, 1.06372065e+11],
[ -2.39993397e+08, 3.76824011e+08, -5.94451158e+07,
1.82871400e+09, -5.66930891e+08, 3.75061111e+08,
-6.84681772e+09, -7.29993789e+10],
[ 5.62644255e+08, -1.21778986e+08, -1.44586401e+08,
-5.66930891e+08, 2.51020202e+09, -4.67886982e+08,
1.78890380e+10, 1.75428694e+11],
[ 2.34300598e+08, 5.38612723e+08, -5.41830441e+06,
3.75061111e+08, -4.67886982e+08, 1.83226125e+09,
-1.27484996e+10, -1.12550321e+11],
[ -3.07824799e+09, -2.12575784e+10, 1.25899515e+10,
-6.84681772e+09, 1.78890380e+10, -1.27484996e+10,
9.03378378e+12, 2.15188047e+13],
[ -1.93425470e+10, -1.69503271e+11, 1.06372065e+11,
-7.29993789e+10, 1.75428694e+11, -1.12550321e+11,
2.15188047e+13, 1.91184510e+14]])
>>> hb
array([[ 33.68732564, -2.33209221, -13.51255321, -1.60840159,
-13.03920385, -9.3506543 , 4.86239173, -9.30409101],
[ -2.33209221, 3.12512611, -6.08530968, -6.79232244,
3.66804898, 1.26497071, 5.10113409, -2.53482995],
[ -13.51255321, -6.08530968, 31.14883498, -5.01514705,
-10.48819911, -2.62533035, 3.82241581, -12.51046342],
[ -1.60840159, -6.79232244, -5.01514705, 28.40141917,
-8.72489636, -8.82449456, 5.47584023, -18.20500017],
[ -13.03920385, 3.66804898, -10.48819911, -8.72489636,
9.03650914, 3.65206176, 6.55926726, -1.8233635 ],
[ -9.3506543 , 1.26497071, -2.62533035, -8.82449456,
3.65206176, 21.41825348, -1.28610793, 4.28101146],
[ 4.86239173, 5.10113409, 3.82241581, 5.47584023,
6.55926726, -1.28610793, 46.52354448, -32.23861427],
[ -9.30409101, -2.53482995, -12.51046342, -18.20500017,
-1.8233635 , 4.28101146, -32.23861427, 178.61978279]])
>>> np.linalg.eigh(hb)
(array([ -10.50373649, 0.7460258 , 14.73131793, 29.72453087,
36.24103832, 41.98042979, 48.99815223, 190.04303734]), array([[-0.40303259, 0.10181305, 0.18164206, 0.48201456, 0.03916688,
0.00903695, 0.74620692, 0.05853619],
[-0.3201713 , -0.88444855, -0.19867642, 0.02828812, 0.16733946,
-0.21440765, -0.02927317, 0.01176904],
[-0.41847094, 0.00170161, 0.04973298, 0.43276118, -0.55894304,
0.26454728, -0.49745582, 0.07251685],
[-0.3508729 , -0.08302723, 0.25004884, -0.73495077, -0.38936448,
0.20677082, 0.24464779, 0.11448238],
[-0.62065653, 0.44662675, -0.37388565, -0.19453047, 0.29084735,
-0.34151809, -0.19088978, 0.00342713],
[-0.15119802, -0.01099165, 0.84377273, 0.00554863, 0.37332324,
-0.17917015, -0.30371283, -0.03635211],
[ 0.15813581, 0.0293601 , 0.09882271, 0.03515962, -0.48768565,
-0.81960996, 0.05248464, 0.22533642],
[-0.06118044, -0.00549223, 0.03205047, -0.01782649, -0.21128588,
-0.14391393, 0.05973658, -0.96226835]]))
>>> np.linalg.eigh(np.linalg.inv(hb))
(array([-0.09520422, 0.00526197, 0.02040893, 0.02382062, 0.02759303,
0.03364225, 0.06788259, 1.34043621]), array([[-0.40303259, 0.05853619, 0.74620692, -0.00903695, -0.03916688,
0.48201456, 0.18164206, 0.10181305],
[-0.3201713 , 0.01176904, -0.02927317, 0.21440765, -0.16733946,
0.02828812, -0.19867642, -0.88444855],
[-0.41847094, 0.07251685, -0.49745582, -0.26454728, 0.55894304,
0.43276118, 0.04973298, 0.00170161],
[-0.3508729 , 0.11448238, 0.24464779, -0.20677082, 0.38936448,
-0.73495077, 0.25004884, -0.08302723],
[-0.62065653, 0.00342713, -0.19088978, 0.34151809, -0.29084735,
-0.19453047, -0.37388565, 0.44662675],
[-0.15119802, -0.03635211, -0.30371283, 0.17917015, -0.37332324,
0.00554863, 0.84377273, -0.01099165],
[ 0.15813581, 0.22533642, 0.05248464, 0.81960996, 0.48768565,
0.03515962, 0.09882271, 0.0293601 ],
[-0.06118044, -0.96226835, 0.05973658, 0.14391393, 0.21128588,
-0.01782649, 0.03205047, -0.00549223]]))
>>> np.diag(np.linalg.inv(hb))
array([ 0.01991288, 1.0433882 , 0.00516616, 0.02642799, 0.24732871,
0.05281555, 0.02236704, 0.00643486])
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 0.14111302, 1.02146375, 0.07187597, 0.16256686, 0.49732154,
0.22981633, 0.14955616, 0.08021756])
>>> hess = modp.hessian(resp.params)
>>> np.sqrt(np.diag(np.linalg.inv(hess)))
array([ 231.3823423 , 117.79508218, 31.46595143, 53.44753106,
132.4855704 , NaN, 5.47881705, 90.75332693])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-4)[0]
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
array([ 31.93524822, 22.0333515 , NaN, 29.90198792,
38.82615785, NaN, NaN, NaN])
>>> hb=-approx_hess(resp.params, modp.loglike, epsilon=-1e-8)[0]
>>> np.sqrt(np.diag(np.linalg.inv(hb)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 423, in inv
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "C:\Programs\Python25\lib\site-packages\numpy\linalg\linalg.py", line 306, in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
>>> resp.params
array([ 1.58253308e-01, 1.73188603e-01, 1.77357447e-01,
2.06707494e-02, -1.31174789e-01, 8.79915580e-01,
6.47663840e+03, 6.73457641e+02])
>>>
'''
| bsd-3-clause |
jskDr/jamespy_py3 | medic/kdl_cl.py | 1 | 40483 | """
KDL - deep learning for medic
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.signal import convolve2d, fftconvolve
from sklearn import preprocessing, model_selection, metrics
import os
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras import backend as K
from keras import callbacks
import kkeras
def fig2array(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
# ones_255 = np.ones_like( data) * 255
# data = 255 - data
return data
def _gen_cell_r0(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r1(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=5, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
# print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_r2(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
for bd_n in range(np.random.randint(max_bd)+1):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
# circle_d["bd2"] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def _gen_cell_db_r0(N=5, rand_pos_cell=False, disp=False):
db_l = []
cell_img_org = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
for i in range(N):
if disp: # 1, 2, True (not 0 or False)
print('Iteration:', i)
elif disp == 2:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(bd_on=False, rand_pos_cell=rand_pos_cell)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(bd_on=True, rand_pos_cell=rand_pos_cell)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
print("The end.")
return db_l
def gen_cell_db(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd = {'mean': 5, 'std': 1}
else:
stat_ext_bd = None
db_l = []
cell_img_org = gen_cell(bd_on=False,
rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
for i in range(N):
if disp:
print(i, end=",")
if rand_pos_cell:
cell_img = gen_cell(
bd_on=False, rand_pos_cell=rand_pos_cell,
max_bd=max_bd,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
else:
cell_img = cell_img_org.copy()
cellbd_img = gen_cell(
bd_on=True, rand_pos_cell=rand_pos_cell,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cell_img[:, :, 0]) # No RGB Info
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db(db_l, fname_gz="sheet.gz/cell_db.cvs.gz"):
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["celltype"] = celltype
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
# ===================================
# Functions for the Center_Cell mode
# - gen_cell_n_beads,
# gen_cell_db_center_cell,
# save_cell_db_center_cell
# ===================================
def gen_cell(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
return gen_cell_n_beads(bd_on=bd_on,
rand_pos_cell=rand_pos_cell,
r_cell=r_cell, # 0<r_cell<=1
r_bd=r_bd, # 0<r_bd<=1
max_bd=max_bd, # max_bd >= 1
rand_bead_flag=True, # This is onlu changed part.
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd=stat_ext_bd,
bound_flag=bound_flag,
visible=visible,
disp=disp,
fig=fig,
ax=ax)
def gen_cell_n_beads(bd_on=True,
rand_pos_cell=False,
r_cell=0.1, # 0<r_cell<=1
r_bd=0.05, # 0<r_bd<=1
max_bd=3, # max_bd >= 1
rand_bead_flag=False,
# stat_ext_bd=None, # or {'mean':5, 'std':1}
stat_ext_bd={'mean':2, 'std':2},
bound_flag=True,
visible=False,
disp=False,
fig=None,
ax=None):
"""
Generate cell images
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um.
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
if fig is None or ax is None:
assert fig is None and ax is None
fig, ax = plt.subplots(figsize=(2, 2))
# set_axis_bgcolor is not working because of plt.axis('off')
# ax.set_axis_bgcolor('red')
close_fig_flag = True
else:
close_fig_flag = False
fig.patch.set_facecolor('black')
circle_d = {}
if rand_pos_cell:
if bound_flag: # Not generate cells in the boundary
B = r_cell + 2.0 * r_bd
pos_cell = B + (1.0 - 2 * B) * np.random.random(2)
else:
pos_cell = np.random.random(2)
else:
pos_cell = np.array([0.5, 0.5])
def rand_pos_bd():
th = np.random.random() * 2 * np.pi
pos_bd = pos_cell + (r_cell + r_bd) * \
np.array((np.cos(th), np.sin(th)))
return pos_bd
#print( pos_cell, pos_bd)
circle_d["cell"] = plt.Circle(pos_cell, r_cell, color='w')
if bd_on:
if rand_bead_flag:
final_max_bd = np.random.randint(max_bd)+1
else:
# Now, the number of total beads attached a cell is fixed (not random).
final_max_bd = max_bd
for bd_n in range(final_max_bd):
circle_d["bd{}".format(bd_n)] = plt.Circle(rand_pos_bd(), r_bd, color='w')
if stat_ext_bd is not None:
#n_ext_bd = np.max((0, int(np.random.randn()*stat_ext_bd['std'] + stat_ext_bd['mean'])))
n_ext_bd = np.random.randint(stat_ext_bd['mean']+1)
for ext_bd_n in range(n_ext_bd):
ext_bd_pos = np.random.rand(2)
circle_d["ext_bd{}".format(ext_bd_n)] = plt.Circle(ext_bd_pos, r_bd, color='w')
for k in circle_d.keys():
ax.add_artist(circle_d[k])
plt.axis('off')
data_a = fig2array(fig)
if disp:
print("Image array shape = ", data_a.shape)
if visible:
plt.show()
else:
if close_fig_flag:
plt.close()
else:
plt.cla()
return data_a
def gen_cell_db_center_cell(N=5, rand_pos_cell=False,
extra_bead_on=True,
max_bd=3,
disp=False):
"""
db_l = gen_cell_db(N=5, rand_pos_cell=False, extra_bead_on=True, disp=False)
Generate cell_db
Inputs
======
max_bd, int, default=3
The number of the maximum beads attached to a cell.
"""
fig, ax = plt.subplots(figsize=(2, 2))
# ax.set_axis_bgcolor('red')
if extra_bead_on:
stat_ext_bd={'mean':5, 'std':1}
else:
stat_ext_bd=None
db_l = []
for i in range(N):
if disp:
print(i, end=",")
# no_beads is circulated from 0 to max_bd-1
# Hence, gen_cell_no_beads should be prepared.
n_beads = i % max_bd
cellbd_img = gen_cell_n_beads(
bd_on=True,
rand_pos_cell=rand_pos_cell,
max_bd=n_beads, # max_bd is repeated from 0 to max_bd
rand_bead_flag=False,
fig=fig, ax=ax,
stat_ext_bd=stat_ext_bd)
db_l.append(cellbd_img[:, :, 0]) # No RGB Info
plt.close(fig)
print("The end.")
return db_l
def save_cell_db_center_cell(db_l, max_bd, fname_gz="sheet.gz/cell_db.cvs.gz"):
"""
Each image include a cell at the center location and
the numbers of beads in a cell is equally distributed
circulately. That is, 0 beads, 1 beads, ..., max_bd are repeated
for all images.
"""
df_l = []
celltype = 0
for i, db in enumerate(db_l):
df_i = pd.DataFrame()
df_i["ID"] = [i] * np.prod(db.shape)
df_i["n_beads"] = [i % max_bd] * np.prod(db.shape)
df_i["x"] = np.repeat(np.arange(db.shape[0]), db.shape[1])
df_i["y"] = list(range(db.shape[1])) * db.shape[0]
df_i["image"] = db.reshape(-1)
celltype ^= 1
df_l.append(df_i)
cell_df = pd.concat(df_l, ignore_index=True)
cell_df.to_csv(fname_gz, index=False, compression='gzip')
return cell_df
def gen_save_cell_db(N=5, fname_gz="sheet.gz/cell_db.cvs.gz",
extra_bead_on=True, rand_pos_cell=False,
max_bd=3,
classification_mode="Cancer_Normal_Cell",
disp=False):
"""
- Image show without pausing is needed. (Oct 31, 2016)
Parameters
==========
rand_pos_cell, Default=False
If it is True, the position of cell is varied
Otherwise, the position is fixed to be the center (0,0).
max_bd, int, default=3
The number of the maximum beads attached to a cell.
classification_mode, string, default="Cancer_Normal"
if it is "Cancer_Normal_Cell", this function classifies cancer or normal.
If it is "Center_Cell", this fucntion classifies numer of beads in each cell.
In this case, the number of beads in cells are equaly distributed
from 0 to max_bd. For example, if N=100 & max_bd=4, 0-beads,
1-beads, 2-beads and 3-beads cell images are repeated 25 times.
"""
def save(save_fn, db_l, max_bd=None, fname_gz=None):
fname_gz_fold, fname_gz_file = os.path.split(fname_gz)
os.makedirs(fname_gz_fold, exist_ok=True)
if max_bd is None:
cell_df = save_fn(db_l, fname_gz=fname_gz)
else:
cell_df = save_fn(db_l, max_bd, fname_gz=fname_gz)
return cell_df
if classification_mode == "Cancer_Normal_Cell":
db_l = gen_cell_db(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
# classification_mode=classification_mode,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db, db_l, fname_gz=fname_gz)
elif classification_mode == "Center_Cell":
assert int(N % max_bd) == 0, "N % max_bd should zero in the Center_Cell mode"
db_l = gen_cell_db_center_cell(N, rand_pos_cell=rand_pos_cell,
extra_bead_on=extra_bead_on,
max_bd=max_bd,
disp=disp)
if disp:
print("Saving...")
cell_df = save(save_cell_db_center_cell, db_l, max_bd,
fname_gz=fname_gz)
else:
raise ValueError("classification_mode = {} is not supported.".format(classification_mode))
return cell_df
class obj:
def __init__(self, r, L=144):
"""
The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance,
but the mean value is around 9-12 um.
"""
# Initial values
self.Lx, self.Ly = L, L
self.downsamples = 4
self.d_um = 2.2 / self.downsamples
# Input and generated values
self.r = r
self.r_pixels_x = self.r * self.Lx
self.r_pixels_y = self.r * self.Ly
self.r_x_um = self.r_pixels_x * self.d_um
self.r_y_um = self.r_pixels_y * self.d_um
def get_h2d(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(1j * k * z) / (1j * l * z) * np.exp((1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
def get_h2d_inv(nx, ny, l=700, z=0.5, dx=0.8, dy=0.8):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"""
k = 2.0 * np.pi / l
x_vec = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx
x = np.dot(np.ones((nx, 1)), x_vec)
y_vec = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy
y = y_vec * np.ones((1, ny))
#k = 2.0 * np.pi / l
return np.exp(-1j * k * z) / (1j * l * z) * np.exp((-1j * k / (2 * z)) *
(np.power(x, 2) + np.power(y, 2)))
# Freznel
def get_h(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def get_h_inv(ny, nx, z_mm=0.5, dx_um=2.2, dy_um=2.2, l_nm=405):
"""
1D Freznel Differaction Formulation
Input
=====
x, np.array
x position
z, np.array
hight
l, float
lambda
"The PS bead size is 6 um and silica bead is 5 um.
Lymphoma cell size varies in a larger variance, but the mean value is around 9-12 um."
"""
# nano-meter to micro-meter transform (nm -> um)
l_um = l_nm / 1000
z_um = z_mm * 1000
x_vec_um = (np.arange(1, nx+1).reshape(1, -1) - nx/2)*dx_um
x_um = np.dot(np.ones((ny, 1)), x_vec_um)
y_vec_um = (np.arange(1, ny+1).reshape(-1, 1) - ny/2)*dy_um
y_um = y_vec_um * np.ones((1, nx))
return np.exp((-1j * np.pi) / (l_um * z_um) *
(np.power(x_um, 2) + np.power(y_um, 2)))
def fd_conv(Img_xy, h2d, mode ='same'):
#return convolve2d(Img_xy, h2d, mode=mode)
return fftconvolve(Img_xy, h2d, mode=mode)
def cell_fd_info(cell_df):
Lx = cell_df['x'].max() + 1
Ly = cell_df['y'].max() + 1
Limg = cell_df['ID'].max() + 1
#print( Lx, Ly, Limg)
return Limg, Lx, Ly
def cell_fd_conv(cell_df, h144=None):
Limg, Lx, Ly = cell_fd_info(cell_df)
if h144 is None:
h144 = get_h2d(Lx, Ly, l=405, z=0.5, dx=2.2/4, dy=2.2/4)
cell_img_fd_l = []
for l in range(Limg):
cell_img = cell_df[cell_df["ID"] == l]["image"].values.reshape(Lx, Ly)
#cell_img_fd = fd_conv(cell_img, h144)
cell_img_fd = fftconvolve(cell_img, h144, mode='same')
cell_img_fd_l.append(cell_img_fd)
cell_img_fd_a = np.array(cell_img_fd_l)
#print( cell_img_fd_a.shape)
return cell_img_fd_a
def cell_fd_extention(fname_org='sheet.gz/cell_db.cvs.gz', camera_bit_resolution=14):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
def cell_fd_ext_save(fname_org='sheet.gz/cell_db100.cvs.gz',
fname_ext='sheet.gz/cell_fd_db100.cvs.gz'):
cell_df_ext = cell_fd_extention(fname_org)
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
class CELL_FD_EXT():
def __init__(self, fname_org, h2d=None, h2d_inv=None):
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
if h2d is None:
h2d = get_h(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
if h2d_inv is None:
h2d_inv = get_h_inv(Ly, Lx, z_mm=0.5, dx_um=2.2/4, dy_um=2.2/4, l_nm=405)
self.fname_org = fname_org
self.h2d = h2d
self.h2d_inv = h2d_inv
def save(self):
fname_org = self.fname_org
fname_ext = fname_org[:-7] + '_fd' + fname_org[-7:]
print('fname_ext is', fname_ext)
cell_df_ext = self.extention()
# Save data
cell_df_ext.to_csv(fname_ext, index=False, compression='gzip')
return cell_df_ext
def extention(self, camera_bit_resolution=14):
fname_org = self.fname_org
h2d = self.h2d
cell_df = pd.read_csv(fname_org)
Limg, Lx, Ly = cell_fd_info(cell_df)
cell_df_ext = cell_df.copy()
# Fresnel diffraction
cell_img_fd_a = cell_fd_conv(cell_df, h2d)
cell_df_ext['freznel image'] = cell_img_fd_a.reshape(-1)
# max_v, min_v = np.max(cell_df["image"]), np.min(cell_df["image"])
cell_img_fd_a_2d = cell_img_fd_a.reshape(Limg, -1)
cell_img_fd_a_2d_scale = preprocessing.minmax_scale(
np.abs(cell_img_fd_a_2d)) * (2**camera_bit_resolution)
cell_img_fd_a_2d_scale_200x144x144 = cell_img_fd_a_2d_scale.reshape(
Limg, Lx, Ly).astype(int)
cell_df_ext[
'mag freznel image'] = cell_img_fd_a_2d_scale_200x144x144.reshape(-1)
return cell_df_ext
#Deep Learning
def _run_dl_mgh_params_r0(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
#Deep Learning
def run_dl_mgh_params(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (50, 50)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
#model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
#model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
def run_dl_mgh_params_2cl_do(X, y, Lx, Ly, nb_epoch=5000,
batch_size = 128,
nb_classes = 2):
"""
Dropout is also included after batchnormalization to protect
overfitting.
"""
# input image dimensions
img_rows, img_cols = Lx, Ly
# number of convolutional filters to use
nb_filters = 8
# size of pooling area for max pooling
pool_size = (10, 10)
# convolution kernel size
kernel_size = (20, 20)
# the data, shuffled and split between train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y, test_size=0.2, random_state=0)
if K.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid',
input_shape=input_shape))
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Convolution2D(5, 5, 5, border_mode='valid'))
model.add(BatchNormalization())
model.add(Activation('tanh'))
model.add(MaxPooling2D(pool_size=(5,5)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(4))
model.add(BatchNormalization())
model.add(Activation('tanh'))
#model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
# earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=1, mode='auto')
history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test)) #, callbacks=[earlyStopping])
score = model.evaluate(X_test, Y_test, verbose=0)
Y_test_pred = model.predict(X_test, verbose=0)
print('Confusion metrix')
y_test_pred = np_utils.categorical_probas_to_classes(Y_test_pred)
print(metrics.confusion_matrix(y_test, y_test_pred))
print('Test score:', score[0])
print('Test accuracy:', score[1])
kkeras.plot_acc( history)
plt.show()
kkeras.plot_loss( history)
"""
Fresenel Diffraction with a new approach
"""
def f(x_um, y_um, z_mm=0.5, l_nm=405):
return np.exp(1j * np.pi * (np.power(x_um, 2) + np.power(y_um,2)) / (l_nm * z_mm))
def cimshow(f_impulse):
plt.figure(figsize=(7,5))
plt.subplot(2,2,1)
plt.imshow(np.real(f_impulse))
plt.colorbar()
plt.title('Re{}')
plt.subplot(2,2,2)
plt.imshow(np.imag(f_impulse))
plt.colorbar()
plt.title('Img{}')
plt.subplot(2,2,2+1)
plt.imshow(np.abs(f_impulse))
plt.colorbar()
plt.title('Magnitude')
plt.subplot(2,2,2+2)
plt.imshow(np.angle(f_impulse))
plt.colorbar()
plt.title('Phase')
def xy(MAX_x_um = 55, pixel_um=2.2, oversample_rate=4):
N = int(MAX_x_um / (pixel_um / oversample_rate))
x = np.dot(np.ones((N,1)), np.linspace(-MAX_x_um,MAX_x_um,N).reshape(1,-1))
y = np.dot(np.linspace(-MAX_x_um,MAX_x_um,N).reshape(-1,1), np.ones((1,N)))
return x, y
def u(x, y, alpha):
out = np.zeros_like(x)
out[(y>=-alpha/2)&(y<=alpha/2)&(x>=-alpha/2)&(x<=alpha/2)] = 1.0
return out
def u_circle(x,y,radius):
xy2 = np.power(x,2) + np.power(y,2)
# Since x is already matrix for griding, out shape is copied just from x.
# If x is a vector, the shape of out should be redefined to have 2-D form.
out = np.zeros_like(x)
out[xy2<=np.power(radius,2)] = 1.0
return out
# Code for generation H in frequency domain: H <--> h
# Gbp(n,m) = exp(1i*k*Dz*sqrt(1-lambda^2*fx(n,m)^2-lambda^2*fy(n,m)^2))
class GenG():
def upsampling(self, Pow2factor, dx1):
"""
Utility codes
"""
dx2 = dx1 / (2**Pow2factor)
return dx2
def __init__(self, NxNy=(144, 144), Dz_mm=0.5, delta_um = 2.2, UpsampleFactor=2, lambda_nm=405):
"""
oversample=2^UpsampleFactor
"""
delta_m = delta_um * 1e-6
delta2_m = self.upsampling(UpsampleFactor, delta_m)
Nx, Ny = NxNy
dfx = 1/(Nx*delta2_m)
dfy = 1/(Ny*delta2_m)
x = np.arange(-Ny/2, Ny/2)*dfy
y = np.arange(-Nx/2, Nx/2)*dfx
self.xv, self.yv = np.meshgrid(x, y)
self.lambda_m = lambda_nm * 1e-9
self.k_rad = 2*np.pi/self.lambda_m
self.Dz_m = Dz_mm * 1e-3
def bp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2)))
def fp(self):
x, y = self.xv, self.yv
l = self.lambda_m
k = self.k_rad
Dz = self.Dz_m
return np.exp(-1j * k * Dz * np.sqrt(1-np.power(l*x,2)-np.power(l*y,2))) | mit |
linuxmcu/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
davidzchen/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 1 | 53018 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= (self.batch_size() - self.partial_batch_size())
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs)).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset inherits from CompositeTensor but shouldn't be handled here.
if (tf_utils.is_extension_type(v) and
not isinstance(v, dataset_ops.DatasetV2)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)):
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super(KerasSequenceAdapter, self).__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
self._enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter,
GeneratorDataAdapter, KerasSequenceAdapter, CompositeTensorDataAdapter,
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(2) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_numpy_and_scipy(x):
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
return nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
class DataHandler(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None):
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=ds_context.get_strategy(),
model=model)
strategy = ds_context.get_strategy()
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
if not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
context.async_wait()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError("When passing an infinitely repeating dataset, you "
"must specify how many steps to draw.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Arguments:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single output.")
if y.shape.rank > 2:
raise ValueError("`class_weight` not supported for "
"3+ dimensional targets.")
y_classes = smart_cond.smart_cond(
y.shape.rank == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
cw = array_ops.gather_v2(class_weight_tensor, y_classes)
if sw is not None:
cw = math_ops.cast(cw, sw.dtype)
sw, cw = expand_1d((sw, cw))
# `class_weight` and `sample_weight` are multiplicative.
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s."""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, ops.Tensor) and
isinstance(t.shape, tensor_shape.TensorShape) and t.shape.rank == 1):
return array_ops.expand_dims_v2(t, axis=-1)
return t
return nest.map_structure(_expand_single_1d_tensor, data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Arguments:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Arguments:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not
provided.
"""
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}").format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Arguments:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unecessary tuple
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(strategy,
x,
y=None,
sample_weight=None,
class_weight=None):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = dataset_ops.DatasetV2.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label, ", ".join(str(i.shape[0]) for i in nest.flatten(single_data)))
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)
return sparse_tensor.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
# TODO(b/151165986): Use public APIs.
return isinstance(
ds,
(input_lib.DistributedDataset, input_lib.DistributedDatasetsFromFunction))
| apache-2.0 |
nitishsrivastava/deepnet | deepnet/layer.py | 10 | 9558 | """Implements a layer of neurons."""
from parameter import *
import matplotlib.pyplot as plt
plt.ion()
class Layer(Parameter):
def __init__(self, proto, t_op=None, tied_to=None):
super(Layer, self).__init__()
self.tied_to = tied_to
if proto.tied:
tied_to.num_shares += 1
proto = util.LoadMissing(proto, tied_to.proto)
self.proto = proto
self.state = None
self.params = {}
self.hyperparams = proto.hyperparams
self.incoming_edge = []
self.outgoing_edge = []
self.outgoing_neighbour = []
self.incoming_neighbour = []
self.use_suff_stats = False
self.fast_dropout_partner = None
if t_op:
self.batchsize = t_op.batchsize
self.use_suff_stats = t_op.optimizer == deepnet_pb2.Operation.PCD \
or t_op.optimizer == deepnet_pb2.Operation.CD
else:
self.batchsize = 0
self.name = proto.name
self.dimensions = proto.dimensions
self.numlabels = proto.numlabels
self.activation = proto.hyperparams.activation
self.is_input = proto.is_input
self.is_output = proto.is_output
self.loss_function = proto.loss_function
self.loss_weight = proto.loss_weight
self.train_data_handler = None
self.validation_data_handler = None
self.test_data_handler = None
self.tied_to = None
self.data_tied_to = None
self.data = None
self.deriv = None
self.prefix = proto.prefix
self.marker = 0
self.fig = visualize.GetFigId()
self.tiny = 1e-10
self.replicated_neighbour = None
self.is_initialized = proto.is_initialized
self.t_op = t_op
self.learn_precision = False
self.sample_input = self.hyperparams.sample_input
self.LoadParams(proto, t_op=t_op, tied_to=tied_to)
if self.batchsize > 0:
self.AllocateMemory(self.batchsize)
def LoadParams(self, proto, **kwargs):
assert proto
for param in proto.param:
if not param.dimensions:
param.dimensions.extend([proto.numlabels * proto.dimensions, 1])
elif len(param.dimensions) == 1:
param.dimensions.append(1)
super(Layer, self).LoadParams(proto, **kwargs)
def LoadPretrained(self, param):
node_name = param.pretrained_model_node1
if node_name == '':
node_name = self.proto.name
mat = None
for pretrained_model in param.pretrained_model:
model_file = os.path.join(self.prefix, pretrained_model)
ext = os.path.splitext(pretrained_model)[1]
if ext == '.npz':
npzfile = np.load(model_file)
if param.name == 'bias':
this_mat = np.nan_to_num(npzfile['mean'] / npzfile['std'])
elif param.name == 'precision':
this_mat = np.nan_to_num(1. / npzfile['std'])
elif ext == '.npy':
this_mat = np.load(model_file)
else:
model = util.ReadModel(model_file)
# Find the relevant node in the model.
node = next(n for n in model.layer if n.name == node_name)
# Find the relevant parameter in the node.
pretrained_param = next(p for p in node.param if p.name == param.name)
assert pretrained_param.mat != '',\
'Pretrained param %s in layer %s of model %s is empty!!' % (
pretrained_param.name, node.name, pretrained_model)
this_mat = util.ParameterAsNumpy(pretrained_param)
if len(this_mat.shape) == 1:
this_mat = this_mat.reshape(-1, 1)
if mat is None:
mat = this_mat
else:
mat += this_mat
return mat / len(param.pretrained_model)
def SetData(self, data):
self.data = data
def AddIncomingEdge(self, edge):
if edge not in self.incoming_edge:
self.incoming_edge.append(edge)
if self == edge.node1:
neighbour = edge.node2
else:
neighbour = edge.node1
self.incoming_neighbour.append(neighbour)
if neighbour.proto.replicate_bias and neighbour.activation == deepnet_pb2.Hyperparams.REPLICATED_SOFTMAX:
self.replicated_neighbour = neighbour
def AddOutgoingEdge(self, edge):
if edge not in self.outgoing_edge:
self.outgoing_edge.append(edge)
if self == edge.node1:
self.outgoing_neighbour.append(edge.node2)
else:
self.outgoing_neighbour.append(edge.node1)
def PrintNeighbours(self):
for n in self.incoming_neighbour:
print "Incoming edge from %s" % n.name
for n in self.outgoing_neighbour:
print "Outgoing edge to %s" % n.name
def ResetState(self, rand=False):
if rand:
self.state.fill_with_randn()
self.ApplyActivation()
else:
self.state.assign(0)
def GetData(self):
self.state.assign(self.data)
def GetSparsityGradient(self):
h = self.hyperparams
damping = h.sparsity_damping
target = h.sparsity_target
cost = h.sparsity_cost
# Update \hat{\rho}.
self.means.mult(damping)
self.means.add_sums(self.state, axis=1, mult=(1-damping)/self.batchsize)
# Compute gradient.
self.means.subtract(target, target=self.sparsity_gradient)
div = self.GetSparsityDivisor()
self.sparsity_gradient.divide(div)
self.sparsity_gradient.mult(cost)
# Return gradient.
return self.sparsity_gradient
def AllocateMemory(self, batchsize):
self.AllocateBatchsizeDependentMemory(batchsize)
dimensions = self.dimensions
numlabels = self.numlabels
numdims = dimensions * numlabels
self.dimsize = cm.CUDAMatrix(np.zeros((numdims, 1)))
if self.hyperparams.sparsity:
tgt = self.hyperparams.sparsity_target
self.means = cm.CUDAMatrix(tgt + np.zeros((numdims, 1)))
self.sparsity_gradient = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.means_temp2 = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.gradient = cm.CUDAMatrix(np.zeros((numdims, 1)))
self.gradient_history = cm.CUDAMatrix(np.zeros((numdims, 1)))
def AllocateBatchsizeDependentMemory(self, batchsize):
if self.data:
self.data.free_device_memory()
if self.deriv:
self.deriv.free_device_memory()
self.batchsize = batchsize
dimensions = self.dimensions
numlabels = self.numlabels
numdims = dimensions * numlabels
self.statesize = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.batchsize_temp = cm.CUDAMatrix(np.zeros((1, batchsize)))
self.state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.deriv = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
if self.t_op:
if self.t_op.optimizer == deepnet_pb2.Operation.PCD:
self.pos_state = self.state
self.pos_sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.neg_state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.neg_sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.sample = self.pos_sample
self.suff_stats = cm.empty((numdims, 1))
elif self.t_op.optimizer == deepnet_pb2.Operation.CD:
self.sample = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
self.suff_stats = cm.empty((numdims, 1))
else:
self.state = cm.CUDAMatrix(np.zeros((numdims, batchsize)))
if self.is_input or self.is_initialized or self.is_output:
self.data = cm.CUDAMatrix(np.zeros((dimensions, batchsize)))
if self.hyperparams.dropout:
self.mask = cm.CUDAMatrix(np.zeros(self.state.shape))
def CollectSufficientStatistics(self, neg=False):
"""Collect sufficient statistics for this layer."""
h = self.hyperparams
if not neg:
self.state.sum(axis=1, target=self.suff_stats)
if h.sparsity:
sparsity_gradient = self.GetSparsityGradient()
self.suff_stats.add_mult(sparsity_gradient, -self.batchsize)
else:
self.suff_stats.add_sums(self.state, axis=1, mult=-1.0)
if not neg and h.sparsity:
return self.means.sum()/self.means.shape[0]
def Show(self, train=False):
"""Displays useful statistics about the model."""
if not self.proto.hyperparams.enable_display:
return
f = 1
if self.hyperparams.dropout and not train:
f = 1 / (1 - self.hyperparams.dropout_prob)
if self.is_input:
visualize.display_hidden(self.data.asarray(), self.fig, title=self.name)
#visualize.display_w(self.neg_sample.asarray(), 28, 10, self.state.shape[1]/10, self.fig, title=self.name, vmax=1, vmin=0)
#visualize.show_hist(self.params['bias'].asarray(), self.fig)
else:
visualize.display_hidden(f*self.state.asarray(), self.fig, title=self.name)
#visualize.show_hist(self.params['bias'].asarray(), self.fig)
"""
plt.figure(self.fig)
plt.clf()
plt.subplot(1, 3, 1)
plt.title('pos_probabilities')
plt.imshow(self.pos_state.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.subplot(1, 3, 2)
plt.title('neg_probabilities')
plt.imshow(self.neg_state.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.subplot(1, 3, 3)
plt.title('neg_samples')
plt.imshow(self.neg_sample.asarray(), cmap = plt.cm.gray, interpolation = 'nearest', vmax=1, vmin=0)
plt.suptitle(self.name)
plt.draw()
"""
#visualize.display_w(self.neg_sample.asarray(), 1, 1, self.state.shape[1], self.fig, title=self.name)
def display_w(w, s, r, c, fig, vmax=None, vmin=None, dataset='mnist', title='weights'):
def ComputeDeriv(self):
pass
def GetLoss(self, get_deriv=False):
pass
def Sample(self):
pass
def ApplyActivation(self):
pass
def GetSparsityDivisor(self):
self.means_temp2.assign(1)
return self.means_temp2
| bsd-3-clause |
antho2930/bowhead | app/Scripts/close_prediction.py | 2 | 2337 | #!/usr/local/bin/python
import pandas as pd
import redis
import sys
import numpy as np
from sklearn.linear_model import LinearRegression
r = redis.StrictRedis(host='localhost', port=6379, db=0)
book = r.get('laravel:tempbook')
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
TDATA=StringIO(book)
df = pd.read_csv(TDATA)
to_forecast = df.close.values
dates = df.id.values
# mean absolute percentage error
def mape(ypred, ytrue):
""" returns the mean absolute percentage error """
idx = ytrue != 0.0
return 100*np.mean(np.abs(ypred[idx]-ytrue[idx])/ytrue[idx])
def organize_data(to_forecast, window, horizon):
"""
Input:
to_forecast, univariate time series organized as numpy array
window, number of items to use in the forecast window
horizon, horizon of the forecast
Output:
X, a matrix where each row contains a forecast window
y, the target values for each row of X
"""
shape = to_forecast.shape[:-1] + (to_forecast.shape[-1] - window + 1, window)
strides = to_forecast.strides + (to_forecast.strides[-1],)
X = np.lib.stride_tricks.as_strided(to_forecast, shape=shape, strides=strides)
y = np.array([X[i+horizon][-1] for i in range(len(X)-horizon)])
return X[:-horizon], y
k = 4 # number of previous observations to use
h = 1 # forecast horizon
X,y = organize_data(to_forecast, k, h)
m = 10 # number of samples to take in account
regressor = LinearRegression(normalize=True)
regressor.fit(X[:m], y[:m])
#print regressor.coef_
#print 'The error is:%0.2f%%' % mape(regressor.predict(X[m:]),y[m:])
#print y[m:]
#print regressor.predict(X[m:])
#print str(regressor.predict(X[m:])).strip('[]')
#print ', '.join(map(str, y[m:]))
# print out and pop off the last number for the prediction.
print ','.join(map(str, regressor.predict(X[m:])))
"""
http://glowingpython.blogspot.com/2015/01/forecasting-beer-consumption-with.html
figure(figsize=(8,6))
plot(y, label='True demand', color='#377EB8', linewidth=2)
plot(regressor.predict(X),
'--', color='#EB3737', linewidth=3, label='Prediction')
plot(y[:m], label='Train data', color='#3700B8', linewidth=2)
xticks(arange(len(dates))[1::4],dates[1::4], rotation=45)
legend(loc='upper right')
ylabel('beer consumed (millions of litres)')
show()
"""
| apache-2.0 |
jamesjia94/BIDMach | scripts/runICA.py | 8 | 8769 | '''
A testing suite for ICA. This will run some Python code to build the data, then calls the ICA
testing script that contains BIDMach commands, then comes back to this Python code to plot the data.
This code should be in the BIDMach/scripts folder.
(c) February 2015 by Daniel Seita
'''
import matplotlib.pyplot as plt
import numpy as np
import pylab
import sys
from scipy import signal
from sklearn.decomposition import FastICA,PCA
from subprocess import call
'''
Returns a matrix where each row corresponds to one signal. Each row has been standardized to have
zero mean and unit variance (I think), and they also have additive Gaussian noise. In order to
ensure that we actually see enough variation in a small time stamp, the "first" and "second"
variables (and possibly others) are used to increase/decrease the "density" of the data. For
instance a high "first" value pushes the sine waves close together.
> group is an integer that represents the group selection, useful for running many tests
> time is from numpy and controls the density of the data
> num_samples is the number of total samples to use for each row
'''
def get_source(group, time, num_samples):
S = None
first = max(2, int(num_samples/4000))
second = max(3, int(num_samples/3000))
third = max(2, first/10)
if group == 1:
s1 = np.sin(first * time)
s2 = np.sign(np.sin(second * time))
s3 = signal.sawtooth(first * np.pi * time)
S = np.c_[s1, s2, s3]
elif group == 2:
s1 = np.sin(first * time)
s2 = np.sign(np.sin(second * time))
s3 = signal.sawtooth(first * np.pi * time)
s4 = signal.sweep_poly(third * time, [1,2])
S = np.c_[s1, s2, s3, s4]
elif group == 3:
s1 = np.cos(second * time) # Signal 1: cosineusoidal signal
s2 = np.sign(np.sin(second * time)) # Signal 2: square signal
s3 = signal.sawtooth(first * np.pi * time) # Signal 3: saw tooth signal
s4 = signal.sweep_poly(third * time, [1,2]) # Signal 4: sweeping polynomial signal
s5 = np.sin(first * time) # Signal 5: sinusoidal signal
S = np.c_[s1, s2, s3, s4, s5]
elif group == 4:
s1 = np.sin(first * time)
s2 = signal.sawtooth(float(first/2.55) * np.pi * time)
s3 = np.sign(np.sin(second * time))
s4 = signal.sawtooth(first * np.pi * time)
s5 = signal.sweep_poly(third * time, [1,2])
S = np.c_[s1, s2, s3, s4, s5]
S += 0.2 * np.random.normal(size=S.shape)
S /= S.std(axis=0)
return S.T
'''
Generates mixed data. Note that if whitened = True, this picks a pre-whitened matrix to analyze...
Takes in the group number and returns a mixing matrix of the appropriate size. If the data needs to
be pre-whitened, then we should pick an orthogonal mixing matrix. There are three orthogonal
matrices and three non-orthogonal matrices.
'''
def get_mixing_matrix(group, pre_whitened):
A = None
if group == 1:
if pre_whitened:
A = np.array([[ 0, -.8, -.6],
[.8, -.36, .48],
[.6, .48, -.64]])
else:
A = np.array([[ 1, 1, 1],
[0.5, 2, 1],
[1.5, 1, 2]])
elif group == 2:
if pre_whitened:
A = np.array([[-0.040037, 0.24263, -0.015820, 0.96916],
[ -0.54019, 0.29635, 0.78318, -0.083724],
[ 0.84003, 0.23492, 0.48878, -0.016133],
[ 0.030827, -0.89337, 0.38403, 0.23120]])
else:
A = np.array([[ 1, 2, -1, 2.5],
[-.1, -.1, 3, -.9],
[8, 1, 7, 1],
[1.5, -2, 3, -1]])
elif group == 3 or group == 4:
if pre_whitened:
A = np.array([[ 0.31571, 0.45390, -0.59557, 0.12972, 0.56837],
[-0.32657, 0.47508, 0.43818, -0.56815, 0.39129],
[ 0.82671, 0.11176, 0.54879, 0.05170, 0.01480],
[-0.12123, -0.56812, 0.25204, 0.28505, 0.71969],
[-0.30915, 0.48299, 0.29782, 0.75955, -0.07568]])
else:
A = np.array([[ 1, 2, -1, 2.5, 1],
[-.1, -.1, 3, -.9, 2],
[8, 1, 7, 1, 3],
[1.5, -2, 3, -1, 4],
[-.1, 4, -.1, 3, -.2]])
return A
'''
Takes in the predicted source from BIDMach and the original source and attempts to change the order
and cardinality of the predicted data to match the original one. This is purely for debugging. newS
is the list of lists that forms the numpy array, and rows_B_taken ensures a 1-1 correspondence.
> B is the predicted source from BIDMach
> S is the actual source before mixing
'''
def rearrange_data(B, S):
newS = []
rows_B_taken = []
for i in range(S.shape[0]):
new_row = S[i,:]
change_sign = False
best_norm = 99999999
best_row_index = -1
for j in range(B.shape[0]):
if j in rows_B_taken:
continue
old_row = B[j,:]
norm1 = np.linalg.norm(old_row + new_row)
if norm1 < best_norm:
best_norm = norm1
best_row_index = j
change_sign = True
norm2 = np.linalg.norm(old_row - new_row)
if norm2 < best_norm:
best_norm = norm2
best_row_index = j
rows_B_taken.append(best_row_index)
if change_sign:
newS.append((-B[best_row_index,:]).tolist())
else:
newS.append(B[best_row_index,:].tolist())
return np.array(newS)
########
# MAIN #
########
# Some administrative stuff to make it clear how to handle this code.
if len(sys.argv) != 5:
print "\nUsage: python runICA.py <num_samples> <data_group> <pre_zero_mean> <pre_whitened>"
print "<num_samples> should be an integer; recommended to be at least 10000"
print "<data_group> should be an integer; currently only {1,2,3,4} are supported"
print "<pre_zero_mean> should be \'Y\' or \'y\' if you want the (mixed) data to have zero-mean"
print "<pre_whitened> should be \'Y\' or \'y\' if you want the (mixed) data to be pre-whitened"
print "You also need to call this code in the directory where you can call \'./bidmach scripts/ica_test.ssc\'\n"
sys.exit()
n_samples = int(sys.argv[1])
data_group = int(sys.argv[2])
pre_zero_mean = True if sys.argv[3].lower() == "y" else False
pre_whitened = True if sys.argv[4].lower() == "y" else False
if data_group < 1 or data_group > 4:
raise Exception("Data group = " + str(data_group) + " is out of range.")
plot_extra_info = False # If true, plot the mixed input data (X) in addition to the real/predicted sources
# With parameters in pace, generate source, mixing, and output matrices, and save them to files.
np.random.seed(0)
time = np.linspace(0, 8, n_samples) # These need to depend on num of samples
S = get_source(data_group, time, n_samples)
A = get_mixing_matrix(data_group, pre_whitened)
X = np.dot(A,S)
print "\nMean for the mixed data:"
for i in range(X.shape[0]):
print "Row {}: {}".format(i+1, np.mean(X[i,:]))
print "\nThe covariance matrix for the mixed data is\n{}.".format(np.cov(X))
np.savetxt("ica_source.txt", S, delimiter=" ")
np.savetxt("ica_mixing.txt", A, delimiter=" ")
np.savetxt("ica_output.txt", X, delimiter=" ")
print "\nNow calling ICA in BIDMach...\n"
# Call BIDMach. Note that this will exit automatically with sys.exit, without user intervention.
call(["./bidmach", "scripts/ica_test.ssc"])
print "\nFinished with BIDMach. Now let us plot the data."
# Done with BIDMach. First, for the sake of readability, get distributions in same order.
B = pylab.loadtxt('ica_pred_source.txt')
newB = rearrange_data(B, S)
# Extract data and plot results. Add more colors if needed but 5 is plenty.
plt.figure()
if plot_extra_info:
models = [X.T, S.T, newB.T]
names = ['Input to ICA','True Sources Before Mixing','BIDMach\'s FastICA']
else:
models = [S.T, newB.T]
names = ['True Sources Before Mixing','BIDMach\'s FastICA']
colors = ['darkcyan', 'red', 'blue', 'orange', 'yellow']
plot_xlim = min(n_samples-1, 10000)
for ii, (model, name) in enumerate(zip(models, names), 1):
if plot_extra_info:
plt.subplot(3, 1, ii)
else:
plt.subplot(2, 1, ii)
plt.title(name)
plt.xlim([0,plot_xlim])
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
hainm/scikit-xray-examples | demos/dpc/dpc_demo.py | 1 | 6669 | #!/usr/bin/env python
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This is an example script utilizing dpc.py for Differential Phase Contrast
(DPC) imaging based on Fourier shift fitting.
This script requires a SOFC folder containing the test data in your home
directory. The default path for the results (texts and JPEGs) is also your home
directory. It will automatically download the data to your home directory if
you installed wget and unzip utilities. You can also manually download and
decompress the data at https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip
Steps
-----
In this file:
1. Set parameters
2. Load the reference image
3. Save intermediate and final results
in skxray.dpc.dpc_runner:
1. Dimension reduction along x and y direction
2. 1-D IFFT
3. Same calculation on each diffraction pattern
3.1. Read a diffraction pattern
3.2. Dimension reduction along x and y direction
3.3. 1-D IFFT
3.4. Nonlinear fitting
4. Reconstruct the final phase image
"""
import os
from subprocess import call
import scipy
import numpy as np
import matplotlib.pyplot as plt
from pims import ImageSequence
import zipfile
import requests
from clint.textui import progress
import tempfile
from skxray.core import dpc
# dump log messages to screen
dpc.logger.setLevel(dpc.logging.DEBUG)
handler = dpc.logging.StreamHandler()
handler.setLevel(dpc.logging.DEBUG)
dpc.logger.addHandler(handler)
def load_image(filename):
"""
Load an image
Parameters
----------
filename : string
the location and name of an image
Return
----------
t : 2-D numpy array
store the image data
"""
if os.path.exists(filename):
t = plt.imread(filename)
else:
print('Please download and decompress the test data to your home directory\n\
Google drive link, https://drive.google.com/file/d/0B3v6W1bQwN_AVjdYdERHUDBsMmM/edit?usp=sharing\n\
Dropbox link, https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip')
raise Exception('File not found: %s' % filename)
return t
def download_zip(url, path):
r = requests.get(url, stream=True)
temp = tempfile.NamedTemporaryFile(suffix='.zip')
print('Downloading url --> %s\nto --> %s' % (url, temp.name))
with open(temp.name, 'wb') as f:
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
z = zipfile.ZipFile(temp)
print("extracting to --> %s" % path)
z.extractall(path=path)
def run():
# download to this folder
current_folder = os.sep.join(__file__.split(os.sep)[:-1])
dpc_demo_data_path = os.path.join(current_folder, 'SOFC')
if not os.path.exists(dpc_demo_data_path):
zip_file_url = 'https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip?dl=1'
import download
download.run()
# download_zip(zip_file_url, current_folder)
# 1. Set parameters
start_point = [1, 0]
first_image = 1
pixel_size = (55, 55)
focus_to_det = 1.46e6
scan_xstep = 0.1
scan_ystep = 0.1
scan_rows = 121
scan_cols = 121
energy = 19.5
roi = None
padding = 0
weighting = 1.
bad_pixels = None
solver = 'Nelder-Mead'
images = ImageSequence(dpc_demo_data_path + "/*.tif")
img_size = images[0].shape
ref_image = np.ones(img_size)
scale = True
negate = True
print('running dpc')
# 2. Use dpc.dpc_runner
phase, amplitude = dpc.dpc_runner(
ref_image, images, start_point, pixel_size, focus_to_det, scan_rows,
scan_cols, scan_xstep, scan_ystep, energy, padding, weighting, solver,
roi, bad_pixels, negate, scale)
# 3. Save intermediate and final results
print('saving dpc output to disk in --> %s' % current_folder)
scipy.misc.imsave(os.path.join(current_folder, 'phase.jpg'), phase)
np.savetxt(os.path.join(current_folder, 'phase.txt'), phase)
scipy.misc.imsave(os.path.join(current_folder, 'amplitude.jpg'), amplitude)
np.savetxt(os.path.join(current_folder, 'amplitude.txt'), amplitude)
if __name__ == '__main__':
run()
| bsd-3-clause |
samzhang111/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py | 254 | 2005 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
devs1991/test_edx_docmode | venv/share/doc/networkx-1.7/examples/drawing/lanl_routes.py | 10 | 2009 | #!/usr/bin/env python
"""
Routes to LANL from 186 sites on the Internet.
This uses Graphviz for layout so you need PyGraphviz or Pydot.
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2008
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
def lanl_graph():
""" Return the lanl internet view graph from lanl.edges
"""
import networkx as nx
try:
fh=open('lanl_routes.edgelist','r')
except IOError:
print "lanl.edges not found"
raise
G=nx.Graph()
time={}
time[0]=0 # assign 0 to center node
for line in fh.readlines():
(head,tail,rtt)=line.split()
G.add_edge(int(head),int(tail))
time[int(head)]=float(rtt)
# get largest component and assign ping times to G0time dictionary
G0=nx.connected_component_subgraphs(G)[0]
G0.rtt={}
for n in G0:
G0.rtt[n]=time[n]
return G0
if __name__ == '__main__':
import networkx as nx
import math
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
G=lanl_graph()
print "graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G))
print nx.number_connected_components(G),"connected components"
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# use graphviz to find radial layout
pos=nx.graphviz_layout(G,prog="twopi",root=0)
# draw nodes, coloring by rtt ping time
nx.draw(G,pos,
node_color=[G.rtt[v] for v in G],
with_labels=False,
alpha=0.5,
node_size=15)
# adjust the plot limits
xmax=1.02*max(xx for xx,yy in pos.values())
ymax=1.02*max(yy for xx,yy in pos.values())
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.savefig("lanl_routes.png")
| agpl-3.0 |
vybstat/scikit-learn | sklearn/cluster/dbscan_.py | 92 | 12380 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <[email protected]>
# Joel Nothman <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None,
random_state=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if random_state is not None:
warnings.warn("The parameter random_state is deprecated in 0.16 "
"and will be removed in version 0.18. "
"DBSCAN is deterministic except for rare border cases.",
category=DeprecationWarning)
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
random_state: numpy.RandomState, optional
Deprecated and ignored as of version 0.16, will be removed in version
0.18. DBSCAN does not use random initialization.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None, random_state=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
stylianos-kampakis/scikit-learn | sklearn/preprocessing/__init__.py | 268 | 1319 | """
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
| bsd-3-clause |
jungla/ICOM-fluidity-toolbox | 2D/RST/plot_Tr_mom_z.py | 1 | 5941 | import numpy as np
import csv
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from scipy.integrate import simps, trapz
def tracer_m0_z(Zlist,Tr,deltaz):
H = 3 #float(max(Zlist)-min(Zlist))
Zlist = Zlist - deltaz # center of the distribution
# strip nans
S00 = simps(Tr,Zlist)
# S00 = S00/float(N); S01 = S01/float(N); S02 = S02/float(N)
return S00
def tracer_m1_z(Zlist,Tr,deltaz):
H = 3 #float(max(Zlist)-min(Zlist))
Zlist = Zlist - deltaz # center of the distribution
# strip nans
S01 = np.trapz(Tr*(Zlist), Zlist)/H
return S01
def tracer_m2_z(Zlist,Tr,deltaz):
H = 3 #float(max(Zlist)-min(Zlist))
Zlist = Zlist - deltaz # center of the distribution
# strip nans
S00 = np.trapz(Tr, Zlist)/H
S01 = np.trapz(Tr*(Zlist), Zlist)/H
S02 = np.trapz(Tr*(Zlist)**2, Zlist)/H
# S00 = S00/float(N); S01 = S01/float(N); S02 = S02/float(N)
return (S02-S01**2)/S00
return S01
label_BW = 'm_25_2b_tracer'
label_B = 'm_25_2b_tracer'
label = 'm_25_1b_tracer'
basename_BW = 'Tracer_CG_1_'+label_BW
basename_B = 'Tracer_CG_1_'+label_B
dayi = 0
dayf = 90
days = 1
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl) + 1
time = range(dayi,dayf,days)
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = './Tracer_CG/z/'
# read csv
Tracer_B = []
Tracer_BW = []
for t in time:
# print 'read drate', t
with open('./Tracer_CG/z/'+basename_B+'_'+str(t)+'_z_clip.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
Tracer_B.append(row[:])
for t in time:
with open('./Tracer_CG/z/'+basename_BW+'_'+str(t)+'_z_clip.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
Tracer_BW.append(row[:])
Tracer_B = np.asarray(Tracer_B).astype(float)
Tracer_BW = np.asarray(Tracer_BW).astype(float)
plt.contourf(np.asarray(time)*1440/3600.,-1*Zlist,np.transpose(Tracer_B),60)
plt.xlabel('Time [hr]',fontsize=18)
plt.ylabel('Depth [m]',fontsize=18)
plt.colorbar()
plt.title('Tracer mean concentration $B25_m$')
plt.savefig('./plot/'+label+'/Tracer_'+label_B+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tracer_'+label_B+'.eps'
plt.close()
plt.contourf(np.asarray(time)*1440/3600.,-1*Zlist,np.transpose(Tracer_BW),60)
plt.xlabel('Time [hr]',fontsize=18)
plt.ylabel('Depth [m]',fontsize=18)
plt.colorbar()
plt.title('Tracer mean concentration $BW25_m$')
plt.savefig('./plot/'+label+'/Tracer_'+label_BW+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tracer_'+label_BW+'.eps'
plt.close()
# plot trace concentration at various times
pB, = plt.plot(Zlist,Tracer_B[0,:],'k',linewidth=2)
plt.plot(Zlist,Tracer_B[5,:],'k',linewidth=2)
plt.plot(Zlist,Tracer_B[10,:],'k',linewidth=2)
plt.plot(Zlist,Tracer_B[15,:],'k',linewidth=2)
pBW, = plt.plot(Zlist,Tracer_BW[0,:],'k--',linewidth=2)
plt.plot(Zlist,Tracer_BW[5,:],'k--',linewidth=2)
plt.plot(Zlist,Tracer_BW[10,:],'k--',linewidth=2)
plt.plot(Zlist,Tracer_BW[15,:],'k--',linewidth=2)
plt.legend((pB,pBW),('$B25_m$','$BW25_m$'),loc=4)
plt.xlim(0,25)
plt.savefig('./plot/'+label+'/Tracer_time.eps',bbox_inches='tight')
print './plot/'+label+'/Tracer_time.eps'
plt.close()
deltaz = 0
#0th mom
Tr_disp_B = []
Tr_disp_BW = []
for t in time:
Tr_disp_B.append(tracer_m0_z(Zlist,Tracer_B[t,:],deltaz))
Tr_disp_BW.append(tracer_m0_z(Zlist,Tracer_BW[t,:],deltaz))
pB, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_B,'k',linewidth=2)
pBW, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_BW,'k--',linewidth=2)
plt.legend((pB,pBW),('$B25_m$','$BW25_m$'),loc=4)
plt.xlabel('Time [hr]',fontsize=18)
plt.ylabel('Tracer 0th moment',fontsize=18)
#plt.xticks(np.linspace(timeD[0]/3600.,timeD[-1]/3600.,7),np.linspace(48,24*3+48,7).astype(int))
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/Tr_0mom_'+label+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tr_0mom_'+label+'.eps'
plt.close()
#1st mom
Tr_disp_B = []
Tr_disp_BW = []
for t in time:
Tr_disp_B.append(tracer_m1_z(Zlist,Tracer_B[t,:],deltaz))
Tr_disp_BW.append(tracer_m1_z(Zlist,Tracer_BW[t,:],deltaz))
pB, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_B,'k',linewidth=2)
pBW, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_BW,'k--',linewidth=2)
plt.legend((pB,pBW),('$B25_m$','$BW25_m$'),loc=4)
plt.xlabel('Time [hr]',fontsize=18)
plt.ylabel('Tracer 1st moment',fontsize=18)
#plt.xticks(np.linspace(timeD[0]/3600.,timeD[-1]/3600.,7),np.linspace(48,24*3+48,7).astype(int))
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/Tr_1mom_'+label+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tr_1mom_'+label+'.eps'
plt.close()
#2nd mom
Tr_disp_B = []
Tr_disp_BW = []
for t in time:
Tr_disp_B.append(tracer_m2_z(Zlist,Tracer_B[t,:],deltaz))
Tr_disp_BW.append(tracer_m2_z(Zlist,Tracer_BW[t,:],deltaz))
pB, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_B,'k',linewidth=2)
pBW, = plt.plot(np.asarray(time)*1440/3600.,Tr_disp_BW,'k--',linewidth=2)
plt.legend((pB,pBW),('$B25_m$','$BW25_m$'),loc=4)
plt.xlabel('Time [hr]',fontsize=18)
plt.ylabel('Tracer 2nd moment',fontsize=18)
#plt.xticks(np.linspace(timeD[0]/3600.,timeD[-1]/3600.,7),np.linspace(48,24*3+48,7).astype(int))
#plt.xticks(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7),np.round(np.linspace(np.min(w[w>0]),np.max(w[w>0]),7)*360000)/100,fontsize=16)
#plt.yticks(fontsize=16)
plt.savefig('./plot/'+label+'/Tr_2mom_'+label+'.eps',bbox_inches='tight')
print './plot/'+label+'/Tr_2mom_'+label+'.eps'
plt.close()
| gpl-2.0 |
adamgreenhall/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
DuCorey/bokeh | bokeh/models/sources.py | 1 | 23011 | from __future__ import absolute_import
import warnings
from ..core.has_props import abstract
from ..core.properties import Any, Bool, ColumnData, Dict, Enum, Instance, Int, JSON, List, Seq, String
from ..model import Model
from ..util.dependencies import import_optional
from ..util.warnings import BokehUserWarning
from .callbacks import Callback
from .filters import Filter
pd = import_optional('pandas')
@abstract
class DataSource(Model):
''' A base class for data source types.
'''
selected = Dict(String, Dict(String, Any), default={
'0d': {'glyph': None, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': {}}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
.. code-block:: python
# selection information for line and patch glyphs
'0d' : {
# the glyph that was selected
'glyph': None
# array with the [smallest] index of the segment of the line that was hit
'indices': []
}
# selection for most (point-like) glyphs, except lines and patches
'1d': {
# indices of the points included in the selection
indices: []
}
# selection information for multiline and patches glyphs
'2d': {
# mapping of indices of the multiglyph to array of glyph indices that were hit
# e.g. {3: [5, 6], 4: [5]}
indices: {}
}
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
@abstract
class ColumnarDataSource(DataSource):
''' A base class for data source types, which can be mapped onto
a columnar format.
'''
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
class ColumnDataSource(ColumnarDataSource):
''' Maps names of columns to sequences or arrays.
The ``ColumnDataSource`` is a fundamental data structure of Bokeh. Most
plots, data tables, etc. will be driven by a ``ColumnDataSource``.
If the ColumnDataSource initializer is called with a single argument that
can be any of the following:
* A Python ``dict`` that maps string names to sequences of values, e.g.
lists, arrays, etc.
.. code-block:: python
data = {'x': [1,2,3,4], 'y': np.ndarray([10.0, 20.0, 30.0, 40.0])}
source = ColumnDataSource(data)
* A Pandas ``DataFrame`` object
.. code-block:: python
source = ColumnDataSource(df)
In this case the CDS will have columns corresponding to the columns of
the ``DataFrame``. If the ``DataFrame`` has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
* A Pandas ``GroupBy`` object
.. code-block:: python
group = df.groupby(('colA', 'ColB'))
In this case the CDS will have columns corresponding to the result of
calling ``group.describe()``. The ``describe`` method generates columns
for statistical measures such as ``mean`` and ``count`` for all the
non-grouped orginal columns. The CDS columns are formed by joining
original column names with the computed measure. For example, if a
``DataFrame`` has columns ``'year'`` and ``'mpg'``. Then passing
``df.groupby('year')`` to a CDS will result in columns such as
``'mpg_mean'``
If the ``GroupBy.describe`` result has a named index column, then
CDS will also have a column with this name. However, if the index name
(or any subname of a ``MultiIndex``) is ``None``, then the CDS will have
a column generically named ``index`` for the index.
Note this capability to adapt ``GroupBy`` objects may only work with
Pandas ``>=0.20.0``.
.. note::
There is an implicit assumption that all the columns in a given
``ColumnDataSource`` all have the same length at all times. For this
reason, it is usually preferable to update the ``.data`` property
of a data source "all at once".
'''
data = ColumnData(String, Seq(Any), help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""").asserts(lambda _, data: len(set(len(x) for x in data.values())) <= 1,
lambda obj, name, data: warnings.warn(
"ColumnDataSource's columns must be of the same length. " +
"Current lengths: %s" % ", ".join(sorted(str((k, len(v))) for k, v in data.items())), BokehUserWarning))
def __init__(self, *args, **kw):
''' If called with a single argument that is a dict or
pandas.DataFrame, treat that implicitly as the "data" attribute.
'''
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
if pd and isinstance(raw_data, pd.DataFrame):
raw_data = self._data_from_df(raw_data)
elif pd and isinstance(raw_data, pd.core.groupby.GroupBy):
raw_data = self._data_from_groupby(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
super(ColumnDataSource, self).__init__(**kw)
self.column_names[:] = list(raw_data.keys())
self.data.update(raw_data)
@staticmethod
def _data_from_df(df):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
df (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
_df = df.copy()
index = _df.index
tmp_data = {c: v.values for c, v in _df.iteritems()}
new_data = {}
for k, v in tmp_data.items():
if isinstance(k, tuple):
k = "_".join(k)
new_data[k] = v
if index.name:
new_data[index.name] = index.values
elif index.names:
try:
new_data["_".join(index.names)] = index.values
except TypeError:
new_data["index"] = index.values
else:
new_data["index"] = index.values
return new_data
@staticmethod
def _data_from_groupby(group):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
group (GroupBy) : data to convert
Returns:
dict[str, np.array]
'''
return ColumnDataSource._data_from_df(group.describe())
@classmethod
def from_df(cls, data):
''' Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data)
@classmethod
def from_groupby(cls, data):
''' Create a ``dict`` of columns from a Pandas GroupBy,
suitable for creating a ColumnDataSource.
The data generated is the result of running ``describe``
on the group.
Args:
data (Groupby) : data to convert
Returns:
dict[str, np.array]
'''
return cls._data_from_df(data.describe())
def to_df(self):
''' Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
'''
if not pd:
raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
''' Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name of the form "Series ####"
Returns:
str: the column name used
'''
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def remove(self, name):
''' Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
'''
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def stream(self, new_data, rollover=None):
''' Efficiently update data source columns with new append-only data.
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq]) : a mapping of column names to sequences of
new data to append to each column.
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
# calls internal implementation
self._stream(new_data, rollover)
def _stream(self, new_data, rollover=None, setter=None):
''' Internal implementation to efficiently update data source columns
with new append-only data. The interal implementation adds the setter
attribute. [https://github.com/bokeh/bokeh/issues/6577]
In cases where it is necessary to update data columns in, this method
can efficiently send only the new data, instead of requiring the
entire data set to be re-sent.
Args:
new_data (dict[str, seq] or DataFrame or Series) : a mapping of
column names to sequences of new data to append to each column,
a pandas DataFrame, or a pandas Series in case of a single row -
in this case the Series index is used as column names
All columns of the data source must be present in ``new_data``,
with identical-length append data.
rollover (int, optional) : A maximum column size, above which data
from the start of the column begins to be discarded. If None,
then columns will continue to grow unbounded (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
Returns:
None
Raises:
ValueError
Example:
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[], bar=[]))
# has new, identical-length updates for all columns in source
new_data = {
'foo' : [10, 20],
'bar' : [100, 200],
}
source.stream(new_data)
'''
if pd and isinstance(new_data, pd.Series):
new_data = new_data.to_frame().T
if pd and isinstance(new_data, pd.DataFrame):
newkeys = set(new_data.columns)
else:
newkeys = set(new_data.keys())
oldkeys = set(self.data.keys())
if newkeys != oldkeys:
missing = oldkeys - newkeys
extra = newkeys - oldkeys
if missing and extra:
raise ValueError(
"Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra)))
)
elif missing:
raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
else:
raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
if not (pd and isinstance(new_data, pd.DataFrame)):
import numpy as np
lengths = set()
arr_types = (np.ndarray, pd.Series) if pd else np.ndarray
for k, x in new_data.items():
if isinstance(x, arr_types):
if len(x.shape) != 1:
raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
lengths.add(x.shape[0])
else:
lengths.add(len(x))
if len(lengths) > 1:
raise ValueError("All streaming column updates must be the same length")
self.data._stream(self.document, self, new_data, rollover, setter)
def patch(self, patches, setter=None):
''' Efficiently update data source columns at specific locations
If it is only necessary to update a small subset of data in a
ColumnDataSource, this method can be used to efficiently update only
the subset, instead of requiring the entire data set to be sent.
This method should be passed a dictionary that maps column names to
lists of tuples that describe a patch change to apply. To replace
individual items in columns entirely, the tuples should be of the
form:
.. code-block:: python
(index, new_value) # replace a single column value
# or
(slice, new_values) # replace several column values
Values at an index or slice will be replaced with the corresponding
new values.
In the case of columns whose values are other arrays or lists, (e.g.
image or patches glyphs), it is also possible to patch "subregions".
In this case the first item of the tuple should be a whose first
element is the index of the array item in the CDS patch, and whose
subsequent elements are integer indices or slices into the array item:
.. code-block:: python
# replace the entire 10th column of the 2nd array:
+----------------- index of item in column data source
|
| +--------- row subindex into array item
| |
| | +- column subindex into array item
V V V
([2, slice(None), 10], new_values)
Imagining a list of 2d NumPy arrays, the patch above is roughly
equivalent to:
.. code-block:: python
data = [arr1, arr2, ...] # list of 2d arrays
data[2][:, 10] = new_data
There are some limitations to the kinds of slices and data that can
be accepted.
* Negative ``start``, ``stop``, or ``step`` values for slices will
result in a ``ValueError``.
* In a slice, ``start > stop`` will result in a ``ValueError``
* When patching 1d or 2d subitems, the subitems must be NumPy arrays.
* New values must be supplied as a **flattened one-dimensional array**
of the appropriate size.
Args:
patches (dict[str, list[tuple]]) : lists of patches for each column
Returns:
None
Raises:
ValueError
Example:
The following example shows how to patch entire column elements. In this case,
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[10, 20, 30], bar=[100, 200, 300]))
patches = {
'foo' : [ (slice(2), [11, 12]) ],
'bar' : [ (0, 101), (2, 301) ],
}
source.patch(patches)
After this operation, the value of the ``source.data`` will be:
.. code-block:: python
dict(foo=[11, 22, 30], bar=[101, 200, 301])
For a more comprehensive complete example, see :bokeh-tree:`examples/howto/patch_app.py`.
'''
import numpy as np
extra = set(patches.keys()) - set(self.data.keys())
if extra:
raise ValueError("Can only patch existing columns (extra: %s)" % ", ".join(sorted(extra)))
for name, patch in patches.items():
col_len = len(self.data[name])
for ind, value in patch:
# integer index, patch single value of 1d column
if isinstance(ind, int):
if ind > col_len or ind < 0:
raise ValueError("Out-of bounds index (%d) in patch for column: %s" % (ind, name))
# slice index, patch multiple values of 1d column
elif isinstance(ind, slice):
_check_slice(ind)
if ind.stop is not None and ind.stop > col_len:
raise ValueError("Out-of bounds slice index stop (%d) in patch for column: %s" % (ind.stop, name))
# multi-index, patch sub-regions of "n-d" column
elif isinstance(ind, (list, tuple)):
if len(ind) == 0:
raise ValueError("Empty (length zero) patch multi-index")
if len(ind) == 1:
raise ValueError("Patch multi-index must contain more than one subindex")
if not isinstance(ind[0], int):
raise ValueError("Initial patch sub-index may only be integer, got: %s" % ind[0])
if ind[0] > col_len or ind[0] < 0:
raise ValueError("Out-of bounds initial sub-index (%d) in patch for column: %s" % (ind, name))
if not isinstance(self.data[name][ind[0]], np.ndarray):
raise ValueError("Can only sub-patch into columns with NumPy array items")
if len(self.data[name][ind[0]].shape) != (len(ind)-1):
raise ValueError("Shape mismatch between patch slice and sliced data")
elif isinstance(ind[0], slice):
_check_slice(ind[0])
if ind[0].stop is not None and ind[0].stop > col_len:
raise ValueError("Out-of bounds initial slice sub-index stop (%d) in patch for column: %s" % (ind.stop, name))
# Note: bounds of sub-indices after the first are not checked!
for subind in ind[1:]:
if not isinstance(subind, (int, slice)):
raise ValueError("Invalid patch sub-index: %s" % subind)
if isinstance(subind, slice):
_check_slice(subind)
else:
raise ValueError("Invalid patch index: %s" % ind)
self.data._patch(self.document, self, patches, setter)
def _check_slice(s):
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError("Patch slices must have start < end, got %s" % s)
if (s.start is not None and s.start < 1) or \
(s.stop is not None and s.stop < 1) or \
(s.step is not None and s.step < 1):
raise ValueError("Patch slices must have positive (start, stop, step) values, got %s" % s)
class CDSView(Model):
''' A view into a ColumnDataSource that represents a row-wise subset.
'''
filters = List(Instance(Filter), default=[], help="""
List of filters that the view comprises.
""")
source = Instance(ColumnarDataSource, help="""
The ColumnDataSource associated with this view. Used to determine
the length of the columns.
""")
class GeoJSONDataSource(ColumnarDataSource):
'''
'''
geojson = JSON(help="""
GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
only process a FeatureCollection or GeometryCollection.
""")
@abstract
class RemoteSource(ColumnDataSource):
'''
'''
data_url = String(help="""
The URL to the endpoint for the data.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
'''
'''
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
HTTP headers to set for the Ajax request.
""")
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/linear_model/tests/test_ransac.py | 40 | 12814 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros((100, 1)))
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
if __name__ == "__main__":
np.testing.run_module_suite()
| bsd-3-clause |
mohseniaref/PySAR-1 | pysar/insar_vs_gps.py | 1 | 19558 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
import numpy as np
import getopt
import sys
import os
import h5py
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
def readGPSfile(gpsFile,gps_source):
if gps_source in ['cmm4','CMM4']:
gpsData = np.loadtxt(gpsFile,usecols = (1,2,3,4,5,6,7,8,9,10))
Stations = np.loadtxt(gpsFile,dtype=str,usecols = (0,1))[:,0]
St=[]; Lon=[];Lat=[];Ve=[];Vn=[];Se=[];Sn=[]
for i in range(gpsData.shape[0]):
if 'GPS' in Stations[i]:
Lat.append(gpsData[i,0])
Lon.append(gpsData[i,1])
Ve.append(gpsData[i,2])
Se.append(gpsData[i,3])
Vn.append(gpsData[i,4])
Sn.append(gpsData[i,5])
St.append(Stations[i])
Up=np.zeros(St.shape);Sup=np.zeros(St.shape)
elif gps_source == 'pysar':
# gpsData = np.loadtxt(gpsFile,usecols = (1,2,3,4,5,6,7,8,9))
gpsData = np.loadtxt(gpsFile,usecols = (1,2,3,4,5,6))
Stations = np.loadtxt(gpsFile,dtype=str,usecols = (0,1))[:,0]
St=[];Lon=[];Lat=[];Ve=[];Vn=[];Se=[];Sn=[]
for i in range(gpsData.shape[0]):
Lon.append(gpsData[i,0]-360)
Lat.append(gpsData[i,1])
Ve.append(gpsData[i,2])
Vn.append(gpsData[i,3])
Se.append(gpsData[i,4])
Sn.append(gpsData[i,5])
St.append(Stations[i])
Up=np.zeros(St.shape);Sup=np.zeros(St.shape)
elif gps_source in ['usgs','USGS']:
gpsData_Hz = np.loadtxt(gpsFile,usecols = (0,1,2,3,4,5,6))
gpsData_up = np.loadtxt(gpsFile,usecols = (8,9))
gpsData=np.hstack((gpsData_Hz,gpsData_up))
Stations = np.loadtxt(gpsFile,dtype=str,usecols = (7,8))[:,0]
St=[];Lon=[];Lat=[];Ve=[];Vn=[];Se=[];Sn=[];Up=[];Sup=[]
for i in range(gpsData.shape[0]):
Lat.append(gpsData[i,0])
Lon.append(gpsData[i,1])
Vn.append(gpsData[i,2])
Ve.append(gpsData[i,3])
Sn.append(gpsData[i,4])
Se.append(gpsData[i,5])
Up.append(gpsData[i,6])
Sup.append(gpsData[i,7])
St.append(Stations[i])
return list(St),Lat,Lon,Ve,Se,Vn,Sn,Up,Sup
#def redGPSfile(gpsFile):
# gpsData_Hz = np.loadtxt(gpsFile,usecols = (0,1,2,3,4,5,6))
# gpsData_up = np.loadtxt(gpsFile,usecols = (8,9))
# gpsData=np.hstack((gpsData_Hz,gpsData_up))
# Stations = np.loadtxt(gpsFile,dtype=str,usecols = (7,8))[:,0]
# return list(Stations), gpsData
def nearest(x, tbase,xstep):
# """ find nearest neighbour """
dist = np.sqrt((tbase -x)**2)
if min(dist) <= np.abs(xstep):
indx=dist==min(dist)
else:
indx=[]
return indx
def find_row_column(Lon,Lat,lon,lat,lon_step,lat_step):
################################################
# finding row and column numbers of the GPS point
idx= nearest(Lon, lon, lon_step)
idy= nearest(Lat, lat, lat_step)
if idx !=[] and idy != []:
IDX=np.where(idx==True)[0][0]
IDY=np.where(idy==True)[0][0]
else:
IDX=np.nan
IDY=np.nan
return IDY, IDX
################################################
def Usage():
print '''
********************************
********************************
Compares InSAR and GPS velocities. Option G can be used to specify the mode of Comparison.
Out put is InSARvsGPS.png
Usage:
insar_vs_gps.py -v InSARvelocity.h5 -g GPS velocity file -r Name of the reference GPS station -l ststion list to be compared
-l : if station list is not specified, all stations in GPS velocity file are compared with InSAR
-m min value of the x and y axis of the plot
-M max value of the x and y axis of the plot
-r refernce GPS station
-s second velocity map
-S source of the GPS data: (usgs,cmm4,pysar)
see documentation for more infromation
-I incidence angle (if not given average look angle is used instead)
-H Heading angle (if not given then the program reads it from the attributes of the velocity file)
-G GPS components to be used to compare with InSAR: los_3D , los_Hz , los_Up , gps_Up. [default is los_3D]
los_3D: to project three gps components to LOS
los_Hz: to project horizontal gps components to LOS
los_Up: to project only vertical gps component to LOS
gps_Up: uses vertical GPS to compare with InSAR (InSAR LOS will be projected to Up)
-A annotate the GPS station name on the plot [yes] or no
-C annotation font color [default is green]
-x annotation offset from the point in x direction [default=0]
-y annotation offset from the point in y direction [default=0]
-u to plot 1, 2 or 3 sigma uncertainty [default=1]
-B marker size [default = 15]
Example:
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -S usgs -r BEMT -l 'HNPS,DHLG,SLMS,USGC'
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -S cmm4 -r BEMT -l 'HNPS,DHLG,SLMS,USGC,ROCH,MONP,SIO3,IVCO,TMAP,BMHL,BILL,OGHS'
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -S usgs -r BEMT
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -S usgs -r BEMT -c geo_temporal_coherence.h5 -t 0.95
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -S usgs -r BEMT -c geo_temporal_coherence.h5 -t 0.95 -l 'HNPS,DHLG,SLMS,USGC'
insar_vs_gps.py -v geo_velocity_New_masked.h5 -g usgs_velocities_NAfixed.txt -r BEMT -S usgs -A yes -C green -x 0 -y 0.5 -H 193.0 -I 23.0
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
When user wants to compare two InSAR velocities at GPS stations:
Example: (to comaper geo_InSAR_velocity.h5 with simulated_velocity.h5 in GPS stations)
insar_vs_gps.py -v geo_InSAR_velocity.h5 -g gpsVelocity.txt -r BEMT -s simulated_velocity.h5
********************************
********************************
'''
def main(argv):
annotation='yes'
ann_x=0
ann_y=0
annotation_Color='green'
disp_velocity='yes'
GPS_InSAR_dif_thr=1
gps_comp='los_3D'
uncertainty_fac=1.0
MarkerSize=5
try:
opts, args = getopt.getopt(argv,"v:r:g:G:l:c:t:m:M:s:S:A:B:C:x:y:I:H:u:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt == '-v':
velocityFile = arg
elif opt == '-s':
velocityFile2 = arg
elif opt == '-g':
gpsFile = arg
elif opt == '-r':
refStation = arg
elif opt == '-l':
stationsList = arg.split(',')
elif opt == '-c':
coherenceFile = arg
elif opt == '-t':
thr = float(arg)
elif opt == '-m':
minV=float(arg)
elif opt == 'M':
maxV=float(arg)
elif opt == '-S':
gps_source = arg
elif opt == '-A':
annotation = arg
elif opt == '-C':
annotation_Color = arg
elif opt == '-x':
ann_x = float(arg)
elif opt == '-y':
ann_y = float(arg)
elif opt == '-I':
theta = float(arg)
elif opt == '-H':
heading = float(arg)
elif opt == '-G':
gps_comp = arg
elif opt == '-u':
uncertainty_fac = float(arg)
elif opt == '-B':
MarkerSize = float(arg)
try:
velocityFile
gpsFile
refStation
except:
Usage();sys.exit(1)
try:
thr
except:
thr=0.9
h5file = h5py.File(velocityFile,'r')
dset=h5file['velocity'].get('velocity')
insarData=dset[0:dset.shape[0],0:dset.shape[1]]
k=h5file.keys()
try:
h5file2 = h5py.File(velocityFile2,'r')
dset2=h5file2['velocity'].get('velocity')
insarData2=dset2[0:dset2.shape[0],0:dset2.shape[1]]
except:
print ''
ullon=float(h5file[k[0]].attrs['X_FIRST'])
ullat=float(h5file[k[0]].attrs['Y_FIRST'])
lon_step=float(h5file[k[0]].attrs['X_STEP'])
lat_step=float(h5file[k[0]].attrs['Y_STEP'])
lon_unit=h5file[k[0]].attrs['Y_UNIT']
lat_unit=h5file[k[0]].attrs['X_UNIT']
Length,Width = np.shape(insarData)
lllat=ullat+Length*lat_step
urlon=ullon+Width*lon_step
lat=np.arange(ullat,lllat,lat_step)
lon=np.arange(ullon,urlon,lon_step)
#################################################################################################
# finding the raw an column of the reference gps station and referencing insar data to this pixel
Stations,Lat,Lon,Ve,Se,Vn,Sn,Vu,Su=readGPSfile(gpsFile,gps_source)
idxRef=Stations.index(refStation)
# Length,Width=np.shape(insarData)
# lat,lon,lat_step,lon_step = get_lat_lon(h5file,Length,Width)
IDYref,IDXref=find_row_column(Lon[idxRef],Lat[idxRef],lon,lat,lon_step,lat_step)
#############################################
# Stations, gpsData = redGPSfile(gpsFile)
# idxRef=Stations.index(refStation)
# Lat,Lon,Vn,Ve,Sn,Se,Corr,Vu,Su = gpsData[idxRef,:]
# IDYref,IDXref=find_row_column(Lon,Lat,lon,lat,lon_step,lat_step)
###################################################
if (not np.isnan(IDYref)) and (not np.isnan(IDXref)):
print ''
print '-----------------------------------------------------------------------'
print 'referencing InSAR data to the GPS station at : ' + str(IDYref) + ' , '+ str(IDXref)
if not np.isnan(insarData[IDYref][IDXref]):
insarData=insarData - insarData[IDYref][IDXref]
else:
print '''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
WARNING: nan value for InSAR data at the refernce pixel!
reference station should be a pixel with valid value in InSAR data.
please select another GPS station as the reference station.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
sys.exit(1)
else:
print 'WARNING:'
print 'Reference GPS station is out of the area covered by InSAR data'
print 'please select another GPS station as the reference station.'
sys.exit(1)
#######################################################################################
try:
stationsList
except:
stationsList = Stations
# stationsList.remove(refStation)
# theta=23.0*np.pi/180.0
# heading=193.0*np.pi/180.0
try:
print 'incidence angle = ' + str(theta)
except:
print 'using look angle from the velocity file. For more precise results input the incidence angle using option -I.'
look_n=float(h5file['velocity'].attrs['LOOK_REF1'])
look_f=float(h5file['velocity'].attrs['LOOK_REF2'])
theta=(look_n+look_f)/2.
print 'incidence angle = ' + str(theta)
try:
print 'Heading angle = '+str(heading)
except:
heading = float(h5file['velocity'].attrs['HEADING'])
if heading < 0:
heading=heading+360
theta=theta*np.pi/180.0
heading=heading*np.pi/180.0
if gps_comp in ['los_3D','LOS_3D','los_3d']:
unitVec=[np.cos(heading)*np.sin(theta),-np.sin(theta)*np.sin(heading),-np.cos(theta)]
gps_comp_txt=' projecting three gps components to LOS'
elif gps_comp in ['los_Hz','LOS_HZ','los_hz','los_HZ','LOS_hz']:
unitVec=[np.cos(heading)*np.sin(theta),-np.sin(theta)*np.sin(heading),0]
gps_comp_txt=' projecting horizontal gps components to LOS'
elif gps_comp in ['LOS_UP','los_Up','los_up','LOS_up']:
unitVec=[0,0,-np.cos(theta)]
gps_comp_txt=' projecting vertical gps components to LOS'
elif gps_comp in ['gps_up','GPS_UP','GPS_Up','gps_Up']:
unitVec=[0,0,1]
gps_comp_txt=' comparing veryical gps with InSAR'
print '-------------------------'
print 'Projecting InSAR to vertical'
insarData=-insarData/np.cos(theta)
print '-------------------------'
print 'unit vector for :' + gps_comp_txt
print unitVec
print '-------------------------'
gpsLOS_ref=unitVec[0]*Ve[idxRef]+unitVec[1]*Vn[idxRef]+unitVec[2]*Vu[idxRef]
Sr= ((unitVec[0]**2)*Se[idxRef]**2+(unitVec[1]**2)*Sn[idxRef]**2+(unitVec[2]**2)*Su[idxRef]**2)**0.5
# Sr=((Se[idxRef]**2)*(np.sin(theta)*np.cos(heading))**2+(Sn[idxRef]**2)*(np.sin(heading)*np.sin(theta))**2+(Su[idxRef]**2)*(np.cos(theta)**2))**0.5
print '######################################################################'
try:
h5coh = h5py.File(coherenceFile)
kh5coh=h5coh.keys()
dset=h5coh[kh5coh[0]].get(kh5coh[0])
Coh=dset[0:dset.shape[0],0:dset.shape[1]]
except:
print 'No information about the coherence of the points'
InSAR=[]
GPS=[]
InSAR1=[]
GPS1=[]
InSAR2=[]
GPS2=[]
coherence=[]
GPSx=[]
GPSy=[]
GPSx1=[]
GPSy1=[]
GPSx2=[]
GPSy2=[]
GPS_station=[]
GPS_std=[]
for st in stationsList:
try :
idx=Stations.index(st)
# Lat,Lon,Vn,Ve,Sn,Se,Corr,Vu,Su = gpsData[idx,:]
gpsLOS=unitVec[0]*Ve[idx]+unitVec[1]*Vn[idx]+unitVec[2]*Vu[idx]
Sg= ((unitVec[0]**2)*Se[idx]**2+(unitVec[1]**2)*Sn[idx]**2+(unitVec[2]**2)*Su[idx]**2)**0.5
# Sg=((Se[idx]**2)*(np.sin(theta)*np.cos(heading))**2+(Sn[idx]**2)*(np.sin(heading)*np.sin(theta))**2+(Su[idx]**2)*(np.cos(theta)**2))**0.5
S=(Sg**2+Sr**2)**0.5
gpsLOS=gpsLOS-gpsLOS_ref
IDY,IDX=find_row_column(Lon[idx],Lat[idx],lon,lat,lon_step,lat_step)
insar_velocity=-insarData[IDY][IDX]
try:
gpsLOS=insarData2[IDY][IDX]-insarData2[IDYref][IDXref]
gpsLOS=-1000.0*gpsLOS
except:
InSAR_GPS_Copmarison='yes'
if not np.isnan(insarData[IDY][IDX]):
print '%%%%%%%%%%%%%%%%%%%%'
print st
print 'GPS: ' + str(gpsLOS) + ' +/- '+str(S)
print 'INSAR: '+ str(-insarData[IDY][IDX]*1000.0)
try:
print 'Coherence: ' + str(Coh[IDY][IDX])
coherence.append(Coh[IDY][IDX])
if Coh[IDY][IDX]>thr:
InSAR1.append(-insarData[IDY][IDX]*1000.0)
GPS1.append(gpsLOS)
else:
InSAR2.append(-insarData[IDY][IDX]*1000.0)
GPS2.append(gpsLOS)
except:
print 'No information about the coherence is available!'
InSAR.append(-insarData[IDY][IDX]*1000.0)
GPS.append(gpsLOS)
GPS_station.append(st)
GPSx.append(IDX)
GPSy.append(IDY)
GPS_std.append(S)
if np.abs(gpsLOS+insarData[IDY][IDX]*1000.0) < GPS_InSAR_dif_thr:
GPSx1.append(IDX)
GPSy1.append(IDY)
else:
GPSx2.append(IDX)
GPSy2.append(IDY)
except:
NoInSAR='yes'
# print '######################################################################'
# print 'GPS:'
# print GPS
# print 'InSAR:'
# print InSAR
# print 'Coherence:'
# print coherence
# print 'Stations'
# print GPS_station
# print '######################################################################'
# ind0=InSAR.index(0)
InSAR=np.array(InSAR)
GPS=np.array(GPS)
GPS_std=np.array(GPS_std)
lt=len(InSAR)
# RMSE=np.sqrt((np.sum((InSAR-GPS)**2,0))/lt)
# SAD=np.sum(np.abs(InSAR-GPS),0)/np.sum(np.abs(InSAR))
SAD=np.sum(np.abs(InSAR-GPS),0)/lt
C1=np.zeros([2,len(InSAR)])
C1[0][:]=InSAR
C1[1][:]=GPS
Cor = np.corrcoef(C1)[0][1]
print '++++++++++++++++++++++++++++++++++++++++++++++'
print 'Comparison summary:'
print ''
print 'AAD (average absolute difference)= '+str(SAD) + ' [mm/yr]'
print 'Correlation = '+str(Cor)
print ''
print '++++++++++++++++++++++++++++++++++++++++++++++'
###############################################################
try:
minV
maxV
except:
minV=np.min([InSAR,GPS])
maxV=np.max([InSAR,GPS])
fig = plt.figure()
ax=fig.add_subplot(111)
# ax.errorbar(GPS,InSAR,yerr=1.0, xerr=1.0, fmt='o')
ax.errorbar(GPS,InSAR,yerr=0.0, xerr=uncertainty_fac*GPS_std, fmt='ko',ms=MarkerSize)
ax.plot([minV-3,maxV+3],[minV-3,maxV+3],'k--')
ax.set_ylabel('InSAR [mm/yr]',fontsize=26)
ax.set_xlabel('GPS LOS [mm/yr]',fontsize=26)
ax.set_ylim(minV-3,maxV+3)
ax.set_xlim(minV-3,maxV+3)
##
if annotation in ['yes','y','Y','Yes','YES']:
for i in range(len(GPS)) :
ax.annotate(GPS_station[i],xy=(GPS[i], InSAR[i]), xytext=(GPS[i]+ann_x, InSAR[i]+ann_y),color=annotation_Color)
majorLocator = MultipleLocator(5)
ax.yaxis.set_major_locator(majorLocator)
minorLocator = MultipleLocator(1)
ax.yaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_locator(minorLocator)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(26)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(26)
plt.tick_params(which='major', length=15,width=2)
plt.tick_params(which='minor', length=6,width=2)
figName = 'InSARvsGPS_errorbar.png'
# plt.savefig(figName,pad_inches=0.0)
plt.savefig(figName)
###############################################################
fig = plt.figure()
ax=fig.add_subplot(111)
ax.plot(GPS,InSAR, 'ko',ms=MarkerSize)
ax.plot([minV-3,maxV+3],[minV-3,maxV+3],'k--')
# ax.plot([-10,20],[-10,20],'k--')
ax.set_ylabel('InSAR [mm/yr]',fontsize=26)
ax.set_xlabel('GPS LOS [mm/yr]',fontsize=26)
ax.set_ylim(minV-3,maxV+3)
ax.set_xlim(minV-3,maxV+3)
# ax.set_ylim(-10,15)
# ax.set_xlim(-10,15)
if annotation in ['yes','y','Y','Yes','YES']:
for i in range(len(GPS)) :
ax.annotate(GPS_station[i],xy=(GPS[i], InSAR[i]), xytext=(GPS[i]+ann_x, InSAR[i]+ann_y),color=annotation_Color)
majorLocator = MultipleLocator(5)
ax.yaxis.set_major_locator(majorLocator)
minorLocator = MultipleLocator(1)
ax.yaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_locator(minorLocator)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(26)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(26)
plt.tick_params(which='major', length=15,width=2)
plt.tick_params(which='minor', length=6,width=2)
figName = 'InSARvsGPS.png'
# plt.savefig(figName,pad_inches=0.0)
plt.savefig(figName)
######################################################
try:
Coh
fig = plt.figure()
ax=fig.add_subplot(111)
ax.errorbar(GPS1,InSAR1,yerr=1.0, xerr=1.0, fmt='o')
ax.errorbar(GPS2,InSAR2,yerr=1.0, xerr=1.0, fmt='^')
ax.plot([minV-3,maxV+3],[minV-3,maxV+3],'--')
ax.set_ylabel('InSAR [mm/yr]',fontsize=26)
ax.set_xlabel('GPS LOS [mm/yr]',fontsize=26)
ax.set_ylim(minV-3,maxV+3)
ax.set_xlim(minV-3,maxV+3)
except:
print ''
# if disp_velocity=='yes':
# fig = plt.figure()
# ax=fig.add_subplot(111)
# im=ax.imshow(insarData)
# cbar = plt.colorbar(im)
# ax.plot(GPSx,GPSy,'k^',ms=10)
# ax.plot(IDXref,IDYref,'ks',ms=10)
# ax.plot(GPSx1,GPSy1,'gs',ms=10)
# ax.plot(GPSx2,GPSy2,'rs',ms=10)
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
FlyRanch/figurefirst | inkscape_extensions/0.x/tag_axis.py | 1 | 2468 | #!/usr/bin/env python
import sys
sys.path.append('/usr/share/inkscape/extensions') # or another path, as necessary
sys.path.append('/Applications/Inkscape.app/Contents/Resources/extensions')
sys.path.append('C:\Program Files\Inkscape\share\extensions')
#import xml.etree.ElementTree as ET
#ET.register_namespace('figurefirst', 'http://www.figurefirst.com')
# We will use the inkex module with the predefined Effect base class.
import inkex
# The simplestyle module provides functions for style parsing.
from simplestyle import *
class FigureFirstAxisTagEffect(inkex.Effect):
"""
Modified from example Inkscape effect extension. Tags object with axis tag.
"""
def __init__(self):
"""
Constructor.
Defines the "--name" option of a script.
"""
# Call the base class constructor.
inkex.Effect.__init__(self)
#import matplotlib
#Define string option "--name" with "-n" shortcut and default value "World".
self.OptionParser.add_option('-n', '--name', action = 'store',
type = 'string', dest = 'name', default = 'none',
help = 'Name axis')
inkex.NSS[u"figurefirst"] = u"http://flyranch.github.io/figurefirst/"
try:
inkex.etree.register_namespace("figurefirst","http://flyranch.github.io/figurefirst/")
except AttributeError:
#inkex.etree._NamespaceRegistry.update(inkex.addNS("name", "figurefirst"))
#This happens on windows version of inkscape - it might be good to check
#and see if the namespace has been correctly added to the document
pass
def effect(self):
"""
Effect behaviour.
Overrides base class' method and inserts "Hello World" text into SVG document.
"""
# Get script's "--what" option value.
name = self.options.name
# Get access to main SVG document element and get its dimensions.
svg = self.document.getroot()
# or alternatively
# Create text element
if len(self.selected.values())>1:
raise Exception('too many items')
else:
el = self.selected.values()[0]
newElm = inkex.etree.Element(inkex.addNS("axis", "figurefirst"))
newElm.attrib[inkex.addNS("name", "figurefirst")] = name
#print inkex.NSS
el.append(newElm)
# Create effect instance and apply it.
effect = FigureFirstAxisTagEffect()
effect.affect() | mit |
PYPIT/PYPIT | doc/comparisons/pypit_vs_lowredux.py | 1 | 15098 | # Module to compare PYPIT vs. LowRedux
# v0.1 -- First look [Kast_red only]
# v0.2 -- With improved trace
# v0.3 -- With improved skysub and airtovac 9 Nov 2015
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import sys, os
import yaml
import pdb
from scipy import stats
import matplotlib as mpl
mpl.rcParams['font.family'] = 'stixgeneral'
mpl.rc('xtick', labelsize=18)
mpl.rc('ytick', labelsize=18)
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import astropy.units as u
from astropy.io import fits
from astropy.table import Table
from linetools.spectra import io as lsio
from linetools.spectra.xspectrum1d import XSpectrum1D
sys.path.append('../src/')
import pypit
import arwave as arwv
try:
from xastropy.xutils import xdebug as xdb
except:
pass
def compare_s2n(pp,lrdx_sciobj,pypit_boxfile, iso):
'''Compare boxcar S/N
'''
# Read/Load
pypit_boxspec = lsio.readspec(pypit_boxfile)
# Read LowRedux
sig = np.sqrt(lrdx_sciobj['MASK_BOX']/(lrdx_sciobj['SIVAR_BOX'] + (lrdx_sciobj['MASK_BOX']==0)))
lwrdx_boxspec = XSpectrum1D.from_tuple( (lrdx_sciobj['WAVE_BOX'], lrdx_sciobj['FLUX_BOX'], sig) )
# Plot
plt.clf()
fig = plt.figure(figsize=(16,7))
fig.suptitle("Instr={:s}, Setup={:s} :: Boxcar S/N for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
ax = plt.gca()
ymax = np.median(pypit_boxspec.flux)*2.
# PYPIT
gdpy = pypit_boxspec.sig > 0.
pys2n = pypit_boxspec.flux[gdpy]/pypit_boxspec.sig[gdpy]
ax.plot(pypit_boxspec.dispersion[gdpy],pys2n, 'k-', drawstyle='steps', label='PYPIT')
# LowRedux
gdlx = lwrdx_boxspec.sig > 0.
ax.plot(lwrdx_boxspec.dispersion[gdlx], lwrdx_boxspec.flux[gdlx]/lwrdx_boxspec.sig[gdlx],
'-', color='blue', label='LowRedux')
# Axes
ax.set_xlim(np.min(pypit_boxspec.dispersion.value), np.max(pypit_boxspec.dispersion.value))
ax.set_ylim(0.,np.median(pys2n)*2.)
ax.set_xlabel('Wavelength',fontsize=17.)
ax.set_ylabel('S/N per pixel',fontsize=17.)
# Legend
legend = plt.legend(loc='upper right', borderpad=0.3,
handletextpad=0.3, fontsize='x-large')
# Finish
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1,rect=[0, 0.03, 1, 0.95])
pp.savefig(bbox_inches='tight')
plt.close()
#
def compare_boxcar(pp,lrdx_sciobj,pypit_boxfile, iso):
'''Compare boxcar extractions
'''
# Read/Load
pypit_boxspec = lsio.readspec(pypit_boxfile)
# Read LowRedux
sig = np.sqrt(lrdx_sciobj['MASK_BOX']/(lrdx_sciobj['SIVAR_BOX'] + (lrdx_sciobj['MASK_BOX']==0)))
lwrdx_boxspec = XSpectrum1D.from_tuple( (lrdx_sciobj['WAVE_BOX'], lrdx_sciobj['FLUX_BOX'], sig) )
# Plot
plt.clf()
fig = plt.figure(figsize=(16,11))
gs = gridspec.GridSpec(2, 1)
fig.suptitle("Instr={:s}, Setup={:s} :: Boxcar Extractions for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
for qq in range(2):
ax = plt.subplot(gs[qq])
if qq == 0:
xlim = None
else:
xlim = (6700,7000)
ymax = np.median(pypit_boxspec.flux)*2.
# PYPIT
ax.plot(pypit_boxspec.dispersion, pypit_boxspec.flux, 'k-', drawstyle='steps',label='PYPIT')
ax.plot(pypit_boxspec.dispersion, pypit_boxspec.sig, 'g-', drawstyle='steps')
# LowRedux
ax.plot(lwrdx_boxspec.dispersion, lwrdx_boxspec.flux, '-', color='blue',label='LowRedux')
ax.plot(lwrdx_boxspec.dispersion, lwrdx_boxspec.sig, '-', color='gray')
# Axes
if xlim is None:
ax.set_xlim(np.min(pypit_boxspec.dispersion.value), np.max(pypit_boxspec.dispersion.value))
else:
ax.set_xlim(xlim)
ax.set_ylim(0.,ymax)
ax.set_xlabel('Wavelength',fontsize=19.)
ax.set_ylabel('electrons',fontsize=19.)
# Legend
legend = plt.legend(loc='upper right', borderpad=0.3,
handletextpad=0.3, fontsize='x-large')
# Finish
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1,rect=[0, 0.03, 1, 0.95])
pp.savefig(bbox_inches='tight')
plt.close()
#
def compare_chi2hist(pp,lrdx_scihdu,pypit_skyfile, pypit_skysubfile, pypit_varfile, pypit_objtrcfile, iso):
'''Compare chi^2 histograms for skysub regions
'''
# Load PYPIT
pypit_sky = fits.open(pypit_skyfile)[0].data
pypit_skysub = fits.open(pypit_skysubfile)[0].data
pypit_var = fits.open(pypit_varfile)[0].data
pypit_resid = pypit_skysub / np.sqrt(pypit_var)
pypit_objtrc = fits.open(pypit_objtrcfile)[0].data
pypit_objmsk = fits.open(pypit_objtrcfile)[1].data.astype(int)
# LowRedux
lwrdx_proc = lrdx_scihdu[0].data
lwrdx_ivar = lrdx_scihdu[1].data
lwrdx_sky = lrdx_scihdu[2].data
lwrdx_resid = (lwrdx_proc-lwrdx_sky) * np.sqrt(lwrdx_ivar)
# Select regions
# Median size of box car
dx = np.median(np.sum(pypit_objmsk[:,:,0],axis=1))
#
pypit_skymask = np.zeros_like(pypit_sky)
# Generate sky regions near the object trace
for ii in xrange(pypit_sky.shape[0]):
# Left
idx = np.arange(int(pypit_objtrc[ii,0]-dx*3), int(pypit_objtrc[ii,0]-dx))
pypit_skymask[ii,idx] = 1
# Right
idx = np.arange(int(pypit_objtrc[ii,0]+dx), int(pypit_objtrc[ii,0]+dx*3))
pypit_skymask[ii,idx] = 1
#
skypix = np.where(pypit_skymask==1)
# Residuals
# PYPIT
pypit_chi = pypit_resid[skypix]
# LowRedux
lwrdx_chi = lwrdx_resid[skypix]
# Histograms
# Boundaries
minv, maxv = -7., 7.
binsz = 0.1
# Set the boundaries sensibly given binsz
i0 = int( minv / binsz) - 1
i1 = int( maxv / binsz) + 1
rng = tuple( binsz*np.array([i0,i1]) )
nbin = i1-i0
# Plot
plt.clf()
fig = plt.figure(figsize=(16,7))
fig.suptitle("Instr={:s}, Setup={:s} :: Chi_Resid Histograms for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
gs = gridspec.GridSpec(1, 2)
# PYPIT
axp = plt.subplot(gs[0])
# Histogram
hist, edges = np.histogram(pypit_chi, range=rng, bins=nbin)
axp.bar(edges[:-1], hist, width=binsz, color='black')#, alpha=kwargs['alpha'])
# PDF for Gaussian
area = pypit_chi.size * binsz
xppf = np.linspace(stats.norm.ppf(0.0001), stats.norm.ppf(0.9999), 100)
yppf = area*stats.norm.pdf(xppf)
axp.plot(xppf, yppf, 'r-', alpha=1.0)
# Median
axp.plot([np.median(pypit_chi)]*2, [-9e9,9e9], 'g--')
#
axp.set_xlim(minv,maxv)
axp.set_ylim(0., np.max(yppf)*1.1)
axp.set_xlabel(r'$\chi$ (PYPIT)', fontsize=17)
# LowRedux
axl = plt.subplot(gs[1])
# Histogram
hist, edges = np.histogram(lwrdx_chi, range=rng, bins=nbin)
axl.bar(edges[:-1], hist, width=binsz)#, alpha=kwargs['alpha'])
# PDF for Gaussian
area = lwrdx_chi.size * binsz
yppf = area*stats.norm.pdf(xppf)
axl.plot(xppf, yppf, 'r-', alpha=1.0)
# Median
axl.plot([np.median(lwrdx_chi)]*2, [-9e9,9e9], 'g--')
#
axl.set_xlim(minv,maxv)
axl.set_ylim(0., np.max(yppf)*1.1)
axl.set_xlabel(r'$\chi$ (LowRedux)', fontsize=17)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1,rect=[0, 0.03, 1, 0.95])
pp.savefig(bbox_inches='tight')
plt.close()
def compare_chi2img(pp,lrdx_scihdu,pypit_skysubfile, pypit_varfile, iso):
'''Compare chi^2 images for skysub
'''
# Load PYPIT
pypit_skysub = fits.open(pypit_skysubfile)[0].data
pypit_var = fits.open(pypit_varfile)[0].data
pypit_resid = pypit_skysub / np.sqrt(pypit_var)
# LowRedux
lwrdx_proc = lrdx_scihdu[0].data
lwrdx_ivar = lrdx_scihdu[1].data
lwrdx_sky = lrdx_scihdu[2].data
lwrdx_resid = (lwrdx_proc-lwrdx_sky) * np.sqrt(lwrdx_ivar)
# Plot
vmnx = (-3.,3.)
plt.clf()
fig = plt.figure(figsize=(16,8))
fig.suptitle("Instr={:s}, Setup={:s} :: Chi_Resid Images for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
cm = plt.get_cmap('Greys')
wbox= {'facecolor':'white', 'edgecolor':'white'}
gs = gridspec.GridSpec(2, 20)
# PYPIT
ax = plt.subplot(gs[0,:-1])
mplt = ax.imshow(pypit_resid.T, origin='lower', cmap=cm)
mplt.set_clim(vmin=vmnx[0], vmax=vmnx[1])
ax.text(0.10, 0.80, 'PYPIT', transform=ax.transAxes, fontsize=19, ha='left',bbox=wbox, color='black')
# LowRedux
ax = plt.subplot(gs[1,:-1])
mplt2 = ax.imshow(lwrdx_resid.T, origin='lower', cmap=cm)
mplt2.set_clim(vmin=vmnx[0], vmax=vmnx[1])
ax.text(0.10, 0.80, 'LowRedux', transform=ax.transAxes, fontsize=19, ha='left',bbox=wbox, color='blue')
# Colorbar
cbar_ax = plt.subplot(gs[:,-1])
cb = plt.colorbar(mplt2, cax=cbar_ax)#,orientation='horizontal',)
cb.set_label(r'$\chi$',fontsize=18.)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
pp.savefig(bbox_inches='tight')
plt.close()
def compare_skyspec(pp,sciobj,pypit_skyboxfile, iso):
'''Compare traces
Parameters:
----------
pp: Pdf obj
sciobj: Table
LowRedux sciobj table
iso: tuple
instr, setup, obj strings
'''
# Load
pypit_skyspec = lsio.readspec(pypit_skyboxfile)
#
lwrdx_skywv = sciobj['WAVE_BOX']
lwrdx_skyfx = sciobj['SKY_BOX']
lwrdx_skywv = arwv.vactoair(lwrdx_skywv*u.AA)
#
plt.clf()
plt.figure(figsize=(16,8))
gs = gridspec.GridSpec(2, 1)
# Full
ax = plt.subplot(gs[0])
# PYPIT
ax.plot(pypit_skyspec.dispersion, pypit_skyspec.flux, 'k-', label='PYPIT', drawstyle='steps')
# LowRedux
ax.plot(lwrdx_skywv, lwrdx_skyfx/(2.*sciobj['BOX_RAD']), '-', color='blue', label='LowRedux')
# Axes
ax.set_xlim(np.min(pypit_skyspec.dispersion.value),np.max(pypit_skyspec.dispersion.value))
ax.set_ylim(0.,np.max(pypit_skyspec.flux))
ax.set_xlabel('Wavelength',fontsize=17.)
ax.set_ylabel('Sky (Counts/pix)',fontsize=17.)
# Legend
legend = plt.legend(loc='upper left', borderpad=0.3,
handletextpad=0.3, fontsize='x-large')
# ZOOM
axz = plt.subplot(gs[1])
# PYPIT
axz.plot(pypit_skyspec.dispersion, pypit_skyspec.flux, 'k-', label='PYPIT', drawstyle='steps-mid')
# LowRedux
axz.plot(lwrdx_skywv, lwrdx_skyfx/(2.*sciobj['BOX_RAD']), '-', color='blue', label='LowRedux')
# Axes
zlim = np.array([7200., 7700.])*u.AA
axz.set_xlim(zlim.value)
ymx = np.max( pypit_skyspec.flux[np.where((pypit_skyspec.dispersion>zlim[0])&(pypit_skyspec.dispersion<zlim[1]))])
axz.set_ylim(0.,ymx)
axz.set_xlabel('Wavelength',fontsize=17.)
axz.set_ylabel('Sky (Counts/pix)',fontsize=17.)
# Legend
legend = plt.legend(loc='upper right', borderpad=0.3,
handletextpad=0.3, fontsize='x-large')
ax.set_title("Instr={:s}, Setup={:s} :: Sky Spectra for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
# Finish
plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)
pp.savefig(bbox_inches='tight')
plt.close()
#
def compare_traces(pp,sciobj,pypit_objtrcfile, iso):
'''Compare traces
Parameters:
----------
pp: Pdf obj
sciobj: Table
LowRedux sciobj table
iso: tuple
instr, setup, obj strings
'''
# Read PYPIT
pypit_objtrc = fits.open(pypit_objtrcfile)[0].data
if pypit_objtrc.shape[1] > 1:
raise ValueError('Not ready for multiple objects')
# Read LowRedux
lwrdx_objtrc = sciobj['XPOS']
plt.clf()
plt.figure(figsize=(16,8))
ax = plt.gca()
# PYPIT
ax.plot(pypit_objtrc[:,0], 'k-', drawstyle='steps', label='PYPIT')
# LowRedux
ax.plot(lwrdx_objtrc, '-', color='blue', label='LowRedux')
# Axes
#ax.set_ylim(0.,np.median(pys2n)*2.)
ax.set_xlabel('Row',fontsize=17.)
ax.set_ylabel('Column',fontsize=17.)
ax.set_title("Instr={:s}, Setup={:s} :: Object Traces for {:s} :: PYPIT ({:s})".format(iso[0], iso[1], iso[2], pypit.version), fontsize=18.)
# Legend
legend = plt.legend(loc='lower right', borderpad=0.3,
handletextpad=0.3, fontsize='x-large')
# Finish
pp.savefig(bbox_inches='tight')
plt.close()
def main():
'''Loop through TEST_SUITES and perform comparison where applicable
'''
pypit_roots = dict(box='boxcar',sky='sky',skysub='skysub',var='var',skybox='skybox',objtrc='objtrc')
# Point to all sub-folders
raw_path = os.getenv('DROPBOX_DIR')+'PYPIT/TEST_SUITES/'
walk = os.walk(raw_path)
instruments = next(walk)[1]
# Loop on instruments
for instr in instruments:
# Setups
setups = next(os.walk(raw_path+instr))[1]
for setup in setups:
wdir = os.path.join(os.getenv('TST_PYPIT'),instr,setup)
# Look for LowRedux and MasterFrame folders
low_rdx_path = os.path.join(raw_path,instr,setup,'LowRedux')
yml_fil = low_rdx_path+'/objects.yaml'
pypit_path = os.path.join(os.getenv('TST_PYPIT'),instr,setup,'MasterFrames')
if (os.path.exists(low_rdx_path) & os.path.exists(pypit_path) & os.path.isfile(yml_fil)):
with open(yml_fil, 'r') as infile:
lrdx_dict = yaml.load(infile)
else:
print('No LowRedux for instr={:s}, setup={:s}'.format(instr,setup))
continue
# Loop on sources
for obj in lrdx_dict.keys():
iso = (instr, setup, obj)
# Set LowRedux files
lrdx_scifile = os.path.join(low_rdx_path,lrdx_dict[obj]['sci_file'])
lrdx_scihdu = fits.open(lrdx_scifile)
lrdx_sciobj = Table(lrdx_scihdu[5].data)[0]
lrdx_wvfil = lrdx_dict[obj]['wave_file']
# Setup for PYPIT files
pypit_prefix = pypit_path+'/'+obj+'_'
pypit_wvimg_fil = pypit_path+'/mswvimg_red_000.fits'
# Outfil
outfil = 'PYPIT_vs_LRDX_'+instr+'_'+setup+'.pdf'
pp = PdfPages(outfil)
# Trace test
compare_traces(pp,lrdx_sciobj,pypit_prefix+pypit_roots['objtrc']+'.fits', iso)
# Sky
compare_skyspec(pp,lrdx_sciobj,pypit_prefix+pypit_roots['skybox']+'.fits', iso)
compare_chi2img(pp,lrdx_scihdu,pypit_prefix+pypit_roots['skysub']+'.fits', pypit_prefix+pypit_roots['var']+'.fits', iso)
compare_chi2hist(pp,lrdx_scihdu,pypit_prefix+pypit_roots['sky']+'.fits', pypit_prefix+pypit_roots['skysub']+'.fits', pypit_prefix+pypit_roots['var']+'.fits', pypit_prefix+pypit_roots['objtrc']+'.fits', iso)
# Spectra
compare_boxcar(pp,lrdx_sciobj,pypit_prefix+pypit_roots['box']+'.fits', iso)
compare_s2n(pp,lrdx_sciobj,pypit_prefix+pypit_roots['box']+'.fits', iso)
# Finish
pp.close()
# ################
if __name__ == "__main__":
main()
| gpl-3.0 |
alkyl1978/gnuradio | gr-filter/examples/resampler.py | 58 | 4454 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import filter
from gnuradio import blocks
import sys
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class mytb(gr.top_block):
def __init__(self, fs_in, fs_out, fc, N=10000):
gr.top_block.__init__(self)
rerate = float(fs_out) / float(fs_in)
print "Resampling from %f to %f by %f " %(fs_in, fs_out, rerate)
# Creating our own taps
taps = filter.firdes.low_pass_2(32, 32, 0.25, 0.1, 80)
self.src = analog.sig_source_c(fs_in, analog.GR_SIN_WAVE, fc, 1)
#self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, N)
# A resampler with our taps
self.resamp_0 = filter.pfb.arb_resampler_ccf(rerate, taps,
flt_size=32)
# A resampler that just needs a resampling rate.
# Filter is created for us and designed to cover
# entire bandwidth of the input signal.
# An optional atten=XX rate can be used here to
# specify the out-of-band rejection (default=80).
self.resamp_1 = filter.pfb.arb_resampler_ccf(rerate)
self.snk_in = blocks.vector_sink_c()
self.snk_0 = blocks.vector_sink_c()
self.snk_1 = blocks.vector_sink_c()
self.connect(self.src, self.head, self.snk_in)
self.connect(self.head, self.resamp_0, self.snk_0)
self.connect(self.head, self.resamp_1, self.snk_1)
def main():
fs_in = 8000
fs_out = 20000
fc = 1000
N = 10000
tb = mytb(fs_in, fs_out, fc, N)
tb.run()
# Plot PSD of signals
nfftsize = 2048
fig1 = pylab.figure(1, figsize=(10,10), facecolor="w")
sp1 = fig1.add_subplot(2,1,1)
sp1.psd(tb.snk_in.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_in)
sp1.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp1.set_xlim([-fs_in/2, fs_in/2])
sp2 = fig1.add_subplot(2,1,2)
sp2.psd(tb.snk_0.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With our filter")
sp2.psd(tb.snk_1.data(), NFFT=nfftsize,
noverlap=nfftsize/4, Fs = fs_out,
label="With auto-generated filter")
sp2.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
sp2.set_xlim([-fs_out/2, fs_out/2])
sp2.legend()
# Plot signals in time
Ts_in = 1.0/fs_in
Ts_out = 1.0/fs_out
t_in = scipy.arange(0, len(tb.snk_in.data())*Ts_in, Ts_in)
t_out = scipy.arange(0, len(tb.snk_0.data())*Ts_out, Ts_out)
fig2 = pylab.figure(2, figsize=(10,10), facecolor="w")
sp21 = fig2.add_subplot(2,1,1)
sp21.plot(t_in, tb.snk_in.data())
sp21.set_title(("Input Signal at f_s=%.2f kHz" % (fs_in/1000.0)))
sp21.set_xlim([t_in[100], t_in[200]])
sp22 = fig2.add_subplot(2,1,2)
sp22.plot(t_out, tb.snk_0.data(),
label="With our filter")
sp22.plot(t_out, tb.snk_1.data(),
label="With auto-generated filter")
sp22.set_title(("Output Signals at f_s=%.2f kHz" % (fs_out/1000.0)))
r = float(fs_out)/float(fs_in)
sp22.set_xlim([t_out[r * 100], t_out[r * 200]])
sp22.legend()
pylab.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
trankmichael/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
KellyChan/python-examples | python/data_science_de/Q5-SEM-RF.py | 3 | 2013 | """
Project: Data Science
Subject: Machine Learning - SEM
Author: Kelly Chan
Date: May 10 2014
"""
tabPath = "path/outputs/sem/tables/"
import numpy as np
import pylab as pl
import pandas as pd
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestRegressor
def loadData(datafile):
return pd.read_csv(datafile)
def splitData(data, rate):
index = int(len(data) * rate)
train = data.iloc[:index]
test = data.iloc[index:]
return train, test
def createRFRegression():
rf = RandomForestRegressor(n_estimators=20)
return rf
def predictRF(rf, train, test, cols, target):
rf.fit(train[cols], train[target])
return rf.predict(test[cols])
def evaluateRF(rf, test, cols, target):
r2 = r2_score(test[target], rf.predict(test[cols]))
mse = np.mean((test[target] - rf.predict(test[cols]))**2)
return r2, mse
def plotRF(rf, test, cols, target, r2):
pl.scatter(test[target], rf.predict(test[cols]))
pl.plot(np.arange(8, 15), np.arange(8, 15), label="r^2=" + str(r2), c="r")
pl.legend(loc="lower right")
pl.title("RandomForest Regression: %s" % target)
pl.show()
def predict(train, test, target, cols):
rf = createRFRegression()
yhat = predictRF(rf, train, test, cols, target)
r2, mse = evaluateRF(rf, test, cols, target)
plotRF(rf, test, cols, target, r2)
return yhat, r2, mse
def main():
data = loadData(tabPath + "clean_data.csv")
train, test = splitData(data, 0.8)
target = 'Order Value'
cols = ['Campaign Clicks', \
'Visitors', 'New Visitors', \
'Bounces', 'Entry Rate %', 'Exit Rate %', \
'OP Visits w Catalogue %', \
'OP Visits w Search %', \
'OP Visits w Product %', \
'OP Visits w step Cart %', \
'Campaign Lifecycle Contacts']
#train = data
#test = data
yhat, r2, mse = predict(train, test, target, cols)
print yhat, r2, mse
if __name__ == '__main__':
main()
| mit |
subutai/nupic.research | nupic/research/frameworks/wandb/ray_wandb.py | 2 | 11410 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import ast
import json
import numbers
import os
import warnings
from copy import deepcopy
import wandb
from ray import tune
from ray.tune.utils import flatten_dict
__all__ = [
"log",
"WandbLogger",
"prep_plot_for_wandb",
]
# Find directory of where wandb save its results.
if "WANDB_DIR" in os.environ:
WANDB_DIR = os.path.join(os.environ["WANDB_DIR"], "wandb")
else:
WANDB_DIR = None
CONFIG_NAME = "ray_wandb_config.json"
def log(log_dict, commit=False, step=None, sync=True, *args, **kwargs):
"""
This logs its arguments to wandb only if a run has been initialized.
"""
if wandb.run:
wandb.log(log_dict, commit=commit, step=step, sync=sync, *args, **kwargs)
class WandbLogger(tune.logger.Logger):
"""
This forks the wandb 0.9.7 ray WandbLogger to
1) make resuming experiments easier
2) include more supported data types
3) support logging time-series formatted results
As a `ray.tune.logger.Logger` this class will process all results returned from
training and automatically sync them to wandb.
To use this class, include it in your `tune.run` config under `loggers` and add
`env_config["wandb"]` to specify wandb params.
The main options include
wandb:
- name: Chosen name of run/experiment
- project: Name of wandb project to group all related runs
- group: Extra layer of naming to group runs under a project
- notes: A multi-line string associated with the run
All are optional, but name, project, and notes are recommended. For all wandb init
params, see https://docs.wandb.com/library/init.
Example usage:
```
# Be sure to set `WANDB_API_KEY` in environment variables.
from ray.tune.logger import DEFAULT_LOGGERS
tune.run(
MyTrianable,
loggers=list(DEFAULT_LOGGERS) + [WandbLogger],
config={
"env_config": {
"wandb": {
"project": "my-project-name",
"name": "my-exp-name",
"group": "group-of-runs",
"notes": "This experiments aims to ..."
},
# Optional
"result_to_time_series_fn":
MyExperiment.expand_result_to_time_series,
}
}
)
```
The "result_to_time_series_fn" is a function that takes a result and config
and returns a dictionary of {timestep: result}. If you provide this
function, you convert from an epoch-based time series to your own
timestep-based time series, logging multiple timesteps for each epoch.
"""
# Only the following types are able to be logged through this class.
# See https://docs.wandb.com/library/log for all wandb data-types.
accepted_types = (
numbers.Number,
wandb.data_types.WBValue, # Base class for all wandb values
)
def _init(self):
"""
This function runs `wandb.init` with two key extra steps:
1) `group` is automatically assigned to the date-time if not already given
2) The config passed to `wandb.init` is saved. This allows `log` (from this
module) to make an identical call to `wandb.init`. While The former init
gets called outside of the ray process, the latter typically does not.
Thus, by saving the wandb-config, we can associate calls to `log` to the
same `group` associated to this logger.
"""
self._config = None
# Auto format the group to be the name of the trial.
env_config = self.config["env_config"]
wandb_config = env_config["wandb"]
# Find latest run config upon resume.
resume = wandb_config.get("resume", False)
if resume and "id" not in wandb_config:
enable_run_resume(wandb_config)
# This will create a new run-directory.
wandb.init(**self.config.get("env_config", {}).get("wandb", {}))
# Get result_to_time_series_fn.
experiment_class = self.config.get("experiment_class", None)
self.result_to_time_series_fn = None
if "result_to_time_series_fn" in env_config:
self.result_to_time_series_fn = env_config["result_to_time_series_fn"]
elif hasattr(experiment_class, "expand_result_to_time_series"):
self.result_to_time_series_fn = (
experiment_class.expand_result_to_time_series
)
def on_result(self, result):
"""
The following is copied from the parent class; however, non-serializable
config values are saved as the repr's so that they are all yaml
serializable. See for details:
- https://github.com/wandb/client/issues/586
"""
config = deepcopy(result.get("config"))
if config and self._config is None:
for k in config.keys():
if wandb.config.get(k) is None:
s = repr(config[k])
try:
ast.literal_eval(s)
wandb.config[k] = config[k]
except (ValueError, SyntaxError):
# Non-serializable
wandb.config[k] = s
self._config = config
tmp = result.copy()
for k in ["done", "config", "pid", "timestamp"]:
if k in tmp:
del tmp[k]
if self.result_to_time_series_fn is not None:
assert self._config is not None
time_series_dict = self.result_to_time_series_fn(tmp, self._config)
for t, d in sorted(time_series_dict.items(), key=lambda x: x[0]):
metrics = {}
for key, value in flatten_dict(d, delimiter="/").items():
if not isinstance(value, self.accepted_types):
continue
metrics[key] = value
wandb.log(metrics, step=t)
else:
metrics = {}
for key, value in flatten_dict(tmp, delimiter="/").items():
if not isinstance(value, self.accepted_types):
continue
metrics[key] = value
wandb.log(metrics)
def close(self):
wandb.join()
class PrepPlotForWandb:
"""
This mixin ensures all plots can be logged to wandb without error. Ray typically
tries to deepcopy the results dict which throws an error since this is not
implemented for plots by matplotlib. This is avoided by first wrapping the plots
with wandb.Image before sending them to Ray which logs them through the WandbLogger.
"""
def run_epoch(self):
"""Wrap plots with wandb.Image"""
results = super().run_epoch()
wandb_plots = {}
for name, value in results.items():
if is_matplotlib_plot(value):
wandb_plots[name] = wandb.Image(value)
results.update(wandb_plots)
return results
@classmethod
def get_execution_order(cls):
eo = super().get_execution_order()
eo["run_epoch"].append(
"PrepPlotForWandb: Wrap plots with wandb.Image")
return eo
# ---------
# Utils
# ---------
def is_matplotlib_plot(value):
typename = wandb.util.get_full_typename(value)
return wandb.util.is_matplotlib_typename(typename)
def prep_plot_for_wandb(plot_func):
"""
This wraps a plotting function to alter it's return value to be of type wandb.Image.
This way, the plot can be logged through ray, specifically the ray WandbLogger,
without error. Ray typically tries to deepcopy all logged objects; however, plots
cannot be deepcopied.
:param plot_func: callable with arbitrary arguments that returns a matplotlib
figure, axes object, or anything related.
"""
def plot_and_make_wandb_image(*args, **kwargs):
plot = plot_func(*args, **kwargs)
if is_matplotlib_plot(plot):
plot = wandb.Image(plot)
else:
warnings.warn(f"Unable to convert object of type {type(plot)}"
" to `wandb.Image`.")
return plot
return plot_and_make_wandb_image
def enable_run_resume(wandb_config):
"""
Finds and sets latest wandb run id to resume the corresponding run.
"""
name = wandb_config.get("name", None)
run_id = wandb_config.get("id", None)
if name and not run_id:
run_id = get_latest_run_id(name=name) or None
if run_id is None:
warnings.warn(
"Couldn't find latest wandb run-id to resume."
"Ensure `WANDB_DIR` environment variable is set.")
wandb_config.update(id=run_id, resume=True)
def get_latest_run_id(name=None):
"""
Gets the config of the latest wandb run.
:param name: (optional) name of run; filters runs so they must match the name given
"""
latest_run_dir = get_latest_run_dir(name=name)
if latest_run_dir is None:
return None
run_id = latest_run_dir.split("-")[-1] or None # None if empty string
return run_id
def get_latest_run_dir(name=None):
"""
Gets the directory of where the latest wandb run is saved.
:param name: (optional) name of run; filters runs so they must match the name given
"""
if WANDB_DIR is None:
return None
all_subdirs = []
for d in os.listdir(WANDB_DIR):
# Make sure run directory exists.
d_full = os.path.join(WANDB_DIR, d)
if not os.path.isdir(d_full):
continue
# Validate name of run when specified.
run_metadata_path = os.path.join(d_full, "wandb-metadata.json")
if name and os.path.isfile(run_metadata_path):
with open(run_metadata_path, "r") as f:
try:
run_metadata = json.load(f)
except json.JSONDecodeError:
run_metadata = {}
d_name = run_metadata.get("name", False)
if d_name and d_name == name:
all_subdirs.append(d_full)
# If name is not given, add to list of run directories by default.
elif name is None:
all_subdirs.append(d_full)
# Find latest run directory chronologically.
latest_run_dir = max(all_subdirs, key=os.path.getmtime)
return latest_run_dir
| agpl-3.0 |
matthewalbani/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
shikhardb/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 78 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
henridwyer/scikit-learn | sklearn/utils/tests/test_utils.py | 215 | 8100 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from itertools import chain
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest, assert_raises_regex)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils import gen_even_slices
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
# Check the check_random_state utility function behavior
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
# Border case not worth mentioning in doctests
assert_true(resample() is None)
def test_deprecated():
# Test whether the deprecated decorator issues appropriate warnings
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
# Check that invalid arguments yield ValueError
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
# fun with read-only data in dataframes
# this happens in joblib memmapping
X.setflags(write=False)
X_df_readonly = pd.DataFrame(X)
with warnings.catch_warnings(record=True):
X_df_ro_indexed = safe_indexing(X_df_readonly, inds)
assert_array_equal(np.array(X_df_ro_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
def test_shuffle_dont_convert_to_array():
# Check that shuffle does not try to convert to numpy arrays with float
# dtypes can let any indexable datastructure pass-through.
a = ['a', 'b', 'c']
b = np.array(['a', 'b', 'c'], dtype=object)
c = [1, 2, 3]
d = MockDataFrame(np.array([['a', 0],
['b', 1],
['c', 2]],
dtype=object))
e = sp.csc_matrix(np.arange(6).reshape(3, 2))
a_s, b_s, c_s, d_s, e_s = shuffle(a, b, c, d, e, random_state=0)
assert_equal(a_s, ['c', 'b', 'a'])
assert_equal(type(a_s), list)
assert_array_equal(b_s, ['c', 'b', 'a'])
assert_equal(b_s.dtype, object)
assert_equal(c_s, [3, 2, 1])
assert_equal(type(c_s), list)
assert_array_equal(d_s, np.array([['c', 2],
['b', 1],
['a', 0]],
dtype=object))
assert_equal(type(d_s), MockDataFrame)
assert_array_equal(e_s.toarray(), np.array([[4, 5],
[2, 3],
[0, 1]]))
def test_gen_even_slices():
# check that gen_even_slices contains all samples
some_range = range(10)
joined_range = list(chain(*[some_range[slice] for slice in gen_even_slices(10, 3)]))
assert_array_equal(some_range, joined_range)
# check that passing negative n_chunks raises an error
slices = gen_even_slices(10, -1)
assert_raises_regex(ValueError, "gen_even_slices got n_packs=-1, must be"
" >=1", next, slices)
| bsd-3-clause |
maheshakya/scikit-learn | examples/svm/plot_rbf_parameters.py | 26 | 4273 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters `gamma`
and `C` of the rbf kernel SVM.
Intuitively, the `gamma` parameter defines how far the influence
of a single training example reaches, with low values meaning 'far'
and high values meaning 'close'.
The `C` parameter trades off misclassification of training examples
against simplicity of the decision surface. A low C makes
the decision surface smooth, while a high C aims at classifying
all training examples correctly.
Two plots are generated. The first is a visualization of the
decision function for a variety of parameter values, and the second
is a heatmap of the classifier's cross-validation accuracy as
a function of `C` and `gamma`. For this example we explore a relatively
large grid for illustration purposes. In practice, a logarithmic
grid from `10**-3` to `10**3` is usually sufficient.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
Y = iris.target
# dataset for decision function visualization
X_2d = X[:, :2]
X_2d = X_2d[Y > 0]
Y_2d = Y[Y > 0]
Y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifier
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = 10.0 ** np.arange(-2, 9)
gamma_range = 10.0 ** np.arange(-5, 4)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedKFold(y=Y, n_folds=3)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, Y)
print("The best classifier is: ", grid.best_estimator_)
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1, 1e2, 1e4]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, Y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma 10^%d, C 10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.jet)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=Y_2d, cmap=plt.cm.jet)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
score_dict = grid.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# draw heatmap of accuracy as a function of gamma and C
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=0.05, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.spectral)
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.show()
| bsd-3-clause |
memorydump85/cardamon | mintplot/mint_plot.py | 1 | 6689 | import sys
import pandas as pd
import numpy as np
import locale
import datetime as dt
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import patches
from matplotlib.dates import MONDAY
from matplotlib.dates import WeekdayLocator, DateFormatter
#
# Style settings
#
plt.style.use('ggplot')
mpl.rcParams['font.family'] = 'Ubuntu'
mpl.rcParams['font.size'] = 10.0
mpl.rcParams['axes.titlesize'] = 'medium'
#
# Category group definitions
#
all_categories = {
'Arts', 'Mobile Phone', 'Entertainment', 'Transfer', 'Fast Food',
'Pharmacy', 'Credit Card Payment', 'Income', 'Federal Tax',
'Uncategorized', 'Gas & Fuel', 'Rental Car & Taxi', 'Health & Fitness',
'Home Improvement', 'Doctor', 'Utilities', 'Cash & ATM',
'Electronics & Software', 'Paycheck', 'Furnishings', 'Gift',
'Coffee Shops', 'Parking', 'Shipping', 'Food & Dining', 'Service & Parts',
'Auto Payment', 'Shopping', 'Laundry', 'Investments', 'Restaurants',
'Movies & DVDs', 'Financial', 'Mortgage & Rent', 'Business Services',
'Hair', 'Groceries', 'Toys', 'Home', 'Clothing', 'Amusement'
}
food_categories = {
'Fast Food', 'Coffee Shops', 'Food & Dining', 'Restaurants', 'Groceries'
}
living_categories = {
'Home Improvement', 'Electronics & Software', 'Furnishings', 'Gift',
'Shopping', 'Toys', 'Clothing', 'Hair'
}
entertainment_categories = {
'Entertainment', 'Movies & DVDs', 'Amusement'
}
transport_categories = {
'Gas & Fuel', 'Rental Car & Taxi', 'Parking', 'Auto Payment'
}
def load_csv_data(csv_file, category_filter=None):
"""
Load csv data and adjust amounts to reflect debit/credit status
debit => positive
credit => negative
"""
df = pd.read_csv(csv_file, parse_dates=[0])
if category_filter is not None:
df = df[df.Category.isin(category_filter)]
df.index = range(len(df))
# Debit transactions only
amounts = df.Amount.values
weights = [ 1. if v=='debit' else -1. for v in df['Transaction Type'].values ]
adjusted = pd.Series([a*w for a, w in zip(amounts, weights)], index=df.index)
df['Amount'] = adjusted
return df
def main():
csv_file = '/home/rpradeep/Downloads/transactions.csv'
df = load_csv_data(csv_file)
dfg = df[['Category', 'Description', 'Amount']]\
.groupby('Category').sum().sort('Amount', ascending=False)
dfD = df[df.Amount >= 0.]
dfgD = dfD[['Category', 'Description', 'Amount']]\
.groupby('Category').sum().sort('Amount', ascending=False)
locale.setlocale( locale.LC_ALL, '' )
def strfcur(a):
return locale.currency(a, grouping=True)
# brewer qualitative colors
palette = { # brewer qualitative
'seagreen': '#1b9e77', 'pumpkin': '#d95f02',
'blue': '#7570b3', 'pink': '#e7298a',
'green': '#66a61e','yellow': '#e6ab02',
'mustard': '#a6761d' }
dates = [ dt.datetime.utcfromtimestamp(d.astype(int)*1e-9)
for d in df.Date.values ]
date_begin = dates[-1].strftime("%b %d")
date_end = dates[0].strftime("%b %d")
plt.figure(figsize=(12,6.5), facecolor='w')
def plot_category_spending():
# Summarize categories whose size <= $100.
cutoff = len(dfg.Amount) if len(dfg.Amount) < 8 \
else np.argmin(dfg.Amount.values >= 100.)
category_names = dfg.index[:cutoff].tolist() + [
'%d Others' % (len(dfg.Amount)-cutoff) ]
amounts = dfg.Amount[:cutoff].tolist() + [dfg.Amount[cutoff:].sum()]
ylabels = [ c for c, a in zip(category_names, amounts) if a > 0. ]
amounts = [ a for a in amounts if a > 0. ]
debits = [ dfgD.Amount[c] if c in dfgD.index else 0. for c in dfg.index[:cutoff] ]
# Setup plot style
plt.yticks(range(len(amounts)), ylabels[::-1])
plt.grid('off')
plt.gca().get_xaxis().set_visible(False)
plt.gca().patch.set_visible(False)
plt.tick_params(axis='y', which='both', left='off', right='off')
style = dict(height=0.6, align='center', edgecolor='0.5')
rects = plt.barh(range(len(amounts)), debits[::-1], color=palette.values()[::-1], alpha=0.4, **style)
rects = plt.barh(range(len(amounts)), amounts[::-1], color=palette.values()[::-1], **style)
# Display amount to the right of each bar
for c, a, r in zip(ylabels, amounts[::-1], rects):
r.set_x(r.get_x()+5) # aestheics
plt.text(r.get_width(), r.get_y()+r.get_height()/2., ' '+strfcur(a),
va='center', size='small', color='0.4')
def plot_timeline():
plt.title('You spent %s between %s and %s' %
(strfcur(df.Amount.sum()), date_begin, date_end))
colors = [ [palette['pumpkin'], palette['seagreen']][v < 0]
for v in df.Amount.values ]
# Setup plot style
plt.gca().xaxis_date()
plt.gca().xaxis.set_major_locator(WeekdayLocator(MONDAY))
plt.gca().xaxis.set_major_formatter(DateFormatter("%d/%b"))
plt.gca().autoscale_view()
plt.gca().xaxis.grid(True, 'major')
plt.gca().grid(True)
staggered = [ date + dt.timedelta(hours=np.random.randint(-12, 12)) for date in dates ]
bars = plt.bar(staggered, df.Amount.values,
color=colors, edgecolor='w', align='center', width=0.5, picker=1)
def on_pick(e):
"""
Pick handler for bar clicks.
Display transaction summary in a separate plot on click
"""
selected_bar = e.artist
ix = bars.index(selected_bar)
plt.subplot2grid((6,2),(2,1),1,1)
_ = df.loc[ix]
formatted = " %s %sed on %s\n %s\n %s\n\
Category: %s\n Account: %s" % (
locale.currency(_['Amount'], grouping=True ),
_['Transaction Type'], _['Date'], _['Description'],
_['Original Description'], _['Category'], _['Account Name'])
plt.text(0, 0.5, formatted, va='center', ha='left',
family='monospace', size='large', color='#220000')
plt.axis('off')
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plt.gcf().canvas.draw()
plt.gcf().canvas.mpl_connect('pick_event', on_pick)
plt.subplot2grid((6,2),(0,0),4,1)
plot_category_spending()
plt.subplot2grid((6,2),(4,0),2,2)
plot_timeline()
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main() | mit |
Becksteinlab/MDPOW | setup.py | 1 | 2450 | # setuptools installation of POW
# Copyright (c) 2010 Oliver Beckstein <[email protected]>
# Released under the GNU Public License 3 (or higher, your choice)
from setuptools import setup, find_packages
# Dynamically calculate the version based on VERSION.
version = __import__('mdpow.version').get_version()
setup(name="MDPOW",
version=version,
description="A library for computing solvation/water partitioning coefficients using molecular dynamics simulations",
long_description=open("README.rst").read(),
author="Oliver Beckstein",
author_email="[email protected]",
license="GPLv3",
url="https://github.com/Becksteinlab/MDPOW",
keywords="science Gromacs analysis 'molecular dynamics'",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
],
packages=find_packages(exclude=['examples']),
scripts=['scripts/mdpow-pow',
'scripts/mdpow-pcw',
'scripts/mdpow-ghyd',
'scripts/mdpow-check',
'scripts/mdpow-rebuild-fep',
'scripts/mdpow-rebuild-simulation',
'scripts/mdpow-equilibrium',
'scripts/mdpow-fep',
'scripts/mdpow-cfg2yaml.py',
'scripts/mdpow-solvationenergy',
'scripts/mdpow-get-runinput'
],
package_data={'mdpow': ['top/*.dat', 'top/*.gro', 'top/*.itp',
'top/oplsaa.ff/*',
'top/charmm36-mar2019.ff/*',
'top/amber99sb.ff/*',
'templates/*'], },
install_requires=['numpy>=1.6', 'scipy',
'pyyaml',
'GromacsWrapper>=0.5.1',
'numkit',
'six',
'mdanalysis',
'alchemlyb',
'pandas',
'pymbar',
],
#setup_requires=['pytest-runner',],
tests_require=['pytest', 'pybol', 'py'],
zip_safe=True,
)
| gpl-3.0 |
dmoliveira/networkx | examples/drawing/knuth_miles.py | 50 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
ecervera/mindstorms-nb | nxt/sensors/functions.py | 1 | 4511 | import nxt.bluesock
import nxt.motor
import math
import time
from bluetooth.btcommon import BluetoothError
def connect(n):
global brick
global mB; global mC
global s1; global s2; global s3; global s4
global tempo
global connected_robot
try:
address = {5: '00:16:53:08:D5:59', 12: '00:16:53:1A:C6:BD'}
brick = nxt.bluesock.BlueSock(address[n]).connect()
mB = nxt.motor.Motor(brick, nxt.motor.PORT_B)
mC = nxt.motor.Motor(brick, nxt.motor.PORT_C)
s1 = nxt.sensor.Touch(brick, nxt.sensor.PORT_1)
s2 = nxt.sensor.Sound(brick, nxt.sensor.PORT_2)
s2.set_input_mode(0x08,0x80) # dB adjusted, percentage
s3 = nxt.sensor.Light(brick, nxt.sensor.PORT_3)
s3.set_illuminated(True)
s3.set_input_mode(0x05,0x80) # Light active, percentage
s4 = nxt.sensor.Ultrasonic(brick, nxt.sensor.PORT_4)
tempo = 0.5
connected_robot = n
print("\x1b[32mRobot %d connectat.\x1b[0m" % n)
except BluetoothError as e:
errno, errmsg = eval(e.args[0])
if errno==16:
print("\x1b[31mNo es pot connectar, hi ha un altre programa ocupant la connexió.\x1b[0m")
elif errno==13:
print("\x1b[31mNo es pot connectar, el dispositiu no està emparellat.\x1b[0m")
elif errno == 112:
print("\x1b[31mNo es troba el brick, assegurat que estiga encés.\x1b[0m")
else:
print("Error %d: %s" % (errno, errmsg))
except KeyError:
print("\x1b[31mNúmero de robot incorrecte.\x1b[0m")
def disconnect():
try:
brick.sock.close()
print("\x1b[32mRobot %d desconnectat.\x1b[0m" % connected_robot)
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def stop():
try:
mB.brake()
mC.brake()
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def forward(speed=100,speed_B=100,speed_C=100):
move(speed_B=min(abs(speed),abs(speed_B)),speed_C=min(abs(speed),abs(speed_C)))
def backward(speed=100,speed_B=100,speed_C=100):
move(speed_B=-min(abs(speed),abs(speed_B)),speed_C=-min(abs(speed),abs(speed_C)))
def left(speed=100):
move(speed_B=0,speed_C=abs(speed))
def left_sharp(speed=100):
move(speed_B=-abs(speed),speed_C=abs(speed))
def right(speed=100):
move(speed_B=abs(speed),speed_C=0)
def right_sharp(speed=100):
move(speed_B=abs(speed),speed_C=-abs(speed))
def move(speed_B=0,speed_C=0):
max_speed = 100
speed_B = int(speed_B)
speed_C = int(speed_C)
if speed_B > 100:
speed_B = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_B < -100:
speed_B = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C > 100:
speed_C = 100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
if speed_C < -100:
speed_C = -100
print("\x1b[33mLa velocitat màxima és 100.\x1b[0m")
try:
mB.run(-int(speed_B*max_speed/100))
mC.run(int(speed_C*max_speed/100))
except NameError:
print("\x1b[31mNo hi ha connexió amb el robot.\x1b[0m")
def touch():
return s1.is_pressed()
def sound():
return s2.get_loudness()
def light():
return s3.get_lightness()
from nxt.telegram import InvalidOpcodeError, InvalidReplyError
def ultrasonic():
global s4
try:
return s4.get_distance()
except (InvalidOpcodeError, InvalidReplyError):
disconnect()
print("\x1b[33mError de connexió, reintentant...\x1b[0m")
time.sleep(1)
connect(connected_robot)
return s4.get_distance()
def play_sound(s):
brick.play_sound_file(False, bytes((s+'.rso').encode('ascii')))
def say(s):
play_sound(s)
def play_tone(f,t):
try:
brick.play_tone_and_wait(f, int(t*1000*tempo))
time.sleep(0.01)
except:
pass
from IPython.display import clear_output
def read_and_print(sensor):
try:
while True:
clear_output(wait=True)
print(sensor())
except KeyboardInterrupt:
pass
def test_sensors():
try:
while True:
clear_output(wait=True)
print(" Touch: %d\n Light: %d\n Sound: %d\nUltrasonic: %d" % (touch(),light(),sound(), ultrasonic()))
except KeyboardInterrupt:
pass
import matplotlib.pyplot as plt
def plot(l):
plt.plot(l)
| mit |
lisa-lab/pylearn2 | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 39 | 5044 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
import warnings
try:
import matplotlib.pyplot as plt
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
Eric89GXL/scikit-learn | benchmarks/bench_plot_neighbors.py | 10 | 6499 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure('scikit-learn nearest neighbors benchmark results',
figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
rlong011/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
Divergent914/yakddcup2015 | sample/counting.py | 2 | 3038 | # The script MUST contain a function named azureml_main
# which is the entry point for this module.
#
# The entry point function can contain up to two input arguments:
# Param<dataframe1>: a pandas.DataFrame
# Param<dataframe2>: a pandas.DataFrame
# This module develop the counting features of log events.
# The output is a dataframe with 41 columns:
# Col1: enrollment id
# Cols 2-8: counts of events in Monday to Sunday
# Cols 9-32: counts of events in hour 0-23
# Cols 33-39: counts of event types
# Cols 40-41: counts of source types
from numpy import *
from datetime import datetime
import pandas as pd
def azureml_main(dataframe1 = None, dataframe2 = None):
# Execution logic goes here
unique_enrollment = list(set(dataframe1.iloc[:,0]))
# Define dictionaries to map enrollment_id to row indices
num_unique_enrollment = len(unique_enrollment)
enrollment_dict = dict(zip(unique_enrollment,range(num_unique_enrollment)))
numrows = dataframe1.shape[0]
count_features = zeros((num_unique_enrollment, 41))
# define dictionaries to map source names and event types to indices
source_dict = {'browser':0,'server':1}
event_dict = dict(zip(['access','problem','page_close',\
'nagivate','video','discussion','wiki'],range(7)))
for i in range(numrows):
enrollment_id = dataframe1.iloc[i,0]
row_index = enrollment_dict[enrollment_id]
count_features[row_index,0] = enrollment_id
timestamp_i = float(dataframe1.iloc[i,1])
dateobj = datetime.fromtimestamp(timestamp_i)
weekday = dateobj.weekday()
hour = dateobj.hour
#weekday is between 0 and 6, where Monday is 0, and Sunday is 6
count_features[row_index,weekday+1] += 1
# hour is between 0 and 23
count_features[row_index,hour+8] += 1
event_index = event_dict[dataframe1.iloc[i,3]]
source_index = source_dict[dataframe1.iloc[i,2]]
count_features[row_index,event_index+32] += 1
count_features[row_index,source_index+39] += 1
dataframe1 = pd.DataFrame(count_features)
dataframe1.columns = ['enrollment_id','MonCount',\
'TueCount','WedCount','ThuCount','FriCount',\
'SatCount','SunCount','Hr0Count','Hr1Count','Hr2Count',\
'Hr3Count','Hr4Count','Hr5Count','Hr6Count',\
'Hr7Count','Hr8Count','Hr9Count','Hr10Count',\
'Hr11Count','Hr12Count','Hr13Count','Hr14Count',\
'Hr15Count','Hr16Count','Hr17Count','Hr18Count',\
'Hr19Count','Hr20Count','Hr21Count','Hr22Count',\
'Hr23Count','AccCount','ProCount','PagCount',\
'NagCount','VidCount','DisCount','WikCount',\
'BroCount','SerCount']
# If a zip file is connected to the third input port is connected,
# it is unzipped under ".\Script Bundle". This directory is added
# to sys.path. Therefore, if your zip file contains a Python file
# mymodule.py you can import it using:
# import mymodule
# Return value must be of a sequence of pandas.DataFrame
return dataframe1, | gpl-2.0 |
fzalkow/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
trustedanalytics/spark-tk | python/sparktk/frame/frame.py | 3 | 21286 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.rdd import RDD
from pyspark.sql import DataFrame
from sparktk.frame.pyframe import PythonFrame
from sparktk.frame.schema import schema_to_python, schema_to_scala, schema_is_coercible
from sparktk import dtypes
import logging
logger = logging.getLogger('sparktk')
from sparktk.propobj import PropertiesObject
from sparktk import TkContext
# import constructors for the API's sake (not actually dependencies of the Frame class)
from sparktk.frame.constructors.create import create
from sparktk.frame.constructors.import_csv import import_csv
from sparktk.frame.constructors.import_csv_raw import import_csv_raw
from sparktk.frame.constructors.import_hbase import import_hbase
from sparktk.frame.constructors.import_hive import import_hive
from sparktk.frame.constructors.import_jdbc import import_jdbc
from sparktk.frame.constructors.import_json import import_json
from sparktk.frame.constructors.import_pandas import import_pandas
from sparktk.frame.constructors.import_xml import import_xml
from sparktk.frame.constructors.import_tensorflow import import_tensorflow
__all__ = ["create",
"Frame",
"import_csv",
"import_csv_raw",
"import_hbase",
"import_hive",
"import_jdbc",
"import_json",
"import_pandas",
"import_xml",
"import_tensorflow",
"load"]
class Frame(object):
def __init__(self, tc, source, schema=None, validate_schema=False):
"""(Private constructor -- use tc.frame.create or other methods available from the TkContext)"""
self._tc = tc
if self._is_scala_frame(source):
self._frame = source
elif self._is_scala_rdd(source):
scala_schema = schema_to_scala(tc.sc, schema)
self._frame = self._create_scala_frame(tc.sc, source, scala_schema)
elif self._is_scala_dataframe(source):
self._frame = self._create_scala_frame_from_scala_dataframe(tc.sc, source)
elif isinstance(source, DataFrame):
self._frame = self._create_scala_frame_from_scala_dataframe(tc.sc, source._jdf)
elif isinstance(source, PythonFrame):
self._frame = source
else:
if not isinstance(source, RDD):
if not isinstance(source, list) or (len(source) > 0 and any(not isinstance(row, (list, tuple)) for row in source)):
raise TypeError("Invalid data source. The data parameter must be a 2-dimensional list (list of row data) or an RDD.")
inferred_schema = False
if isinstance(schema, list):
if all(isinstance(item, basestring) for item in schema):
# check if schema is just a list of column names (versus string and data type tuples)
schema = self._infer_schema(source, schema)
inferred_schema = True
elif not all(isinstance(item, tuple) and
len(item) == 2 and
isinstance(item[0], basestring) for item in schema):
raise TypeError("Invalid schema. Expected a list of tuples (str, type) with the column name and data type, but received type %s." % type(schema))
# check for duplicate column names
column_names = [col[0] for col in schema]
duplicate_column_names = set([col for col in column_names if column_names.count(col) > 1])
if len(duplicate_column_names) > 0:
raise ValueError("Invalid schema, column names cannot be duplicated: %s" % ", ".join(duplicate_column_names))
elif schema is None:
schema = self._infer_schema(source)
inferred_schema = True
else:
# Schema is not a list or None
raise TypeError("Invalid schema type: %s. Expected a list of tuples (str, type) with the column name and data type." % type(schema))
for item in schema:
if not self._is_supported_datatype(item[1]):
if inferred_schema:
raise TypeError("The %s data type was found when inferring the schema, and it is not a "
"supported data type. Instead, specify a schema that uses a supported data "
"type, and enable validate_schema so that the data is converted to the proper "
"data type.\n\nInferred schema: %s\n\nSupported data types: %s" %
(str(item[1]), str(schema), dtypes.dtypes))
else:
raise TypeError("Invalid schema. %s is not a supported data type.\n\nSupported data types: %s" %
(str(item[1]), dtypes.dtypes))
source = tc.sc.parallelize(source)
if schema and validate_schema:
# Validate schema by going through the data and checking the data type and attempting to parse it
validate_schema_result = self.validate_pyrdd_schema(source, schema)
source = validate_schema_result.validated_rdd
logger.debug("%s values were unable to be parsed to the schema's data type." % validate_schema_result.bad_value_count)
# If schema contains matrix datatype, then apply type_coercer to convert list[list] to numpy ndarray
map_source = schema_is_coercible(source, list(schema))
self._frame = PythonFrame(map_source, schema)
def _merge_types(self, type_list_a, type_list_b):
"""
Merges two lists of data types
:param type_list_a: First list of data types to merge
:param type_list_b: Second list of data types to merge
:return: List of merged data types
"""
if not isinstance(type_list_a, list) or not isinstance(type_list_b, list):
raise TypeError("Unable to generate schema, because schema is not a list.")
if len(type_list_a) != len(type_list_b):
raise ValueError("Length of each row must be the same (found rows with lengths: %s and %s)." % (len(type_list_a), len(type_list_b)))
return [dtypes._DataTypes.merge_types(type_list_a[i], type_list_b[i]) for i in xrange(0, len(type_list_a))]
def _infer_types_for_row(self, row):
"""
Returns a list of data types for the data in the specified row
:param row: List or Row of data
:return: List of data types
"""
inferred_types = []
for item in row:
if item is None:
inferred_types.append(int)
elif not isinstance(item, list):
inferred_types.append(type(item))
else:
inferred_types.append(dtypes.vector((len(item))))
return inferred_types
def _infer_schema(self, data, column_names=[], sample_size=100):
"""
Infers the schema based on the data in the RDD.
:param sc: Spark Context
:param data: Data used to infer schema
:param column_names: Optional column names to use in the schema. If no column names are provided, columns
are given numbered names. If there are more columns in the RDD than there are in the
column_names list, remaining columns will be numbered.
:param sample_size: Number of rows to check when inferring the schema. Defaults to 100.
:return: Schema
"""
inferred_schema = []
if isinstance(data, list):
if len(data) > 0:
# get the schema for the first row
data_types = self._infer_types_for_row(data[0])
sample_size = min(sample_size, len(data))
for i in xrange (1, sample_size):
data_types = self._merge_types(data_types, self._infer_types_for_row(data[i]))
for i, data_type in enumerate(data_types):
column_name = "C%s" % i
if len(column_names) > i:
column_name = column_names[i]
inferred_schema.append((column_name, data_type))
else:
raise TypeError("Unable to infer schema, because the data provided is not a list.")
return inferred_schema
def _is_supported_datatype(self, data_type):
"""
Returns True if the specified data_type is supported.
"""
supported_primitives = [int, float, long, str, unicode]
if data_type in supported_primitives:
return True
elif data_type is dtypes.datetime:
return True
elif type(data_type) is dtypes.vector:
return True
elif data_type is dtypes.matrix:
return True
else:
return False
def validate_pyrdd_schema(self, pyrdd, schema):
if isinstance(pyrdd, RDD):
schema_length = len(schema)
num_bad_values = self._tc.sc.accumulator(0)
def validate_schema(row, accumulator):
data = []
if len(row) != schema_length:
raise ValueError("Length of the row (%s) does not match the schema length (%s)." % (len(row), len(schema)))
for index, column in enumerate(schema):
data_type = column[1]
try:
if row[index] is not None:
data.append(dtypes.dtypes.cast(row[index], data_type))
except:
data.append(None)
accumulator += 1
return data
validated_rdd = pyrdd.map(lambda row: validate_schema(row, num_bad_values))
# Force rdd to load, so that we can get a bad value count
validated_rdd.count()
return SchemaValidationReturn(validated_rdd, num_bad_values.value)
else:
raise TypeError("Unable to validate schema, because the pyrdd provided is not an RDD.")
@staticmethod
def _create_scala_frame(sc, scala_rdd, scala_schema):
"""call constructor in JVM"""
return sc._jvm.org.trustedanalytics.sparktk.frame.Frame(scala_rdd, scala_schema, False)
@staticmethod
def _create_scala_frame_from_scala_dataframe(sc, scala_dataframe):
"""call constructor in JVM"""
return sc._jvm.org.trustedanalytics.sparktk.frame.Frame(scala_dataframe)
@staticmethod
def _from_scala(tc, scala_frame):
"""creates a python Frame for the given scala Frame"""
return Frame(tc, scala_frame)
def _frame_to_scala(self, python_frame):
"""converts a PythonFrame to a Scala Frame"""
scala_schema = schema_to_scala(self._tc.sc, python_frame.schema)
scala_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.rdd.PythonJavaRdd.pythonToScala(python_frame.rdd._jrdd, scala_schema)
return self._create_scala_frame(self._tc.sc, scala_rdd, scala_schema)
def _is_scala_frame(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.Frame)
def _is_scala_rdd(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.apache.spark.rdd.RDD)
def _is_scala_dataframe(self, item):
return self._tc._jutils.is_jvm_instance_of(item, self._tc.sc._jvm.org.apache.spark.sql.DataFrame)
def _is_python_rdd(self, item):
return isinstance(item, RDD)
@property
def _is_scala(self):
"""answers whether the current frame is backed by a Scala Frame"""
answer = self._is_scala_frame(self._frame)
logger.info("frame._is_scala reference: %s" % answer)
return answer
@property
def _is_python(self):
"""answers whether the current frame is backed by a _PythonFrame"""
answer = not self._is_scala_frame(self._frame)
logger.info("frame._is_python reference: %s" % answer)
return answer
@property
def _scala(self):
"""gets frame backend as Scala Frame, causes conversion if it is current not"""
if self._is_python:
logger.info("frame._scala reference: converting frame backend from Python to Scala")
# If schema contains matrix dataype,
# then apply type_coercer_pymlib to convert ndarray to pymlib DenseMatrix for serialization purpose at java
self._frame.rdd = schema_is_coercible(self._frame.rdd, list(self._frame.schema), True)
# convert PythonFrame to a Scala Frame"""
scala_schema = schema_to_scala(self._tc.sc, self._frame.schema)
scala_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.pythonToScala(self._frame.rdd._jrdd, scala_schema)
self._frame = self._create_scala_frame(self._tc.sc, scala_rdd, scala_schema)
else:
logger.info("frame._scala reference: frame already has a scala backend")
return self._frame
@property
def _python(self):
"""gets frame backend as _PythonFrame, causes conversion if it is current not"""
if self._is_scala:
logger.info("frame._python reference: converting frame backend from Scala to Python")
# convert Scala Frame to a PythonFrame"""
scala_schema = self._frame.schema()
java_rdd = self._tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.scalaToPython(self._frame.rdd())
python_schema = schema_to_python(self._tc.sc, scala_schema)
python_rdd = RDD(java_rdd, self._tc.sc)
# If schema contains matrix datatype, then apply type_coercer to convert list[list] to numpy ndarray
map_python_rdd = schema_is_coercible(python_rdd, list(python_schema))
self._frame = PythonFrame(map_python_rdd, python_schema)
else:
logger.info("frame._python reference: frame already has a python backend")
return self._frame
##########################################################################
# API
##########################################################################
@property
def rdd(self):
"""pyspark RDD (causes conversion if currently backed by a Scala RDD)"""
return self._python.rdd
@property
def dataframe(self):
"""pyspark DataFrame (causes conversion through Scala)"""
return DataFrame(self._scala.dataframe(), self._tc.sql_context)
@property
def schema(self):
if self._is_scala:
return schema_to_python(self._tc.sc, self._frame.schema()) # need ()'s on schema because it's a def in scala
return self._frame.schema
@property
def column_names(self):
"""
Column identifications in the current frame.
:return: list of names of all the frame's columns
Returns the names of the columns of the current frame.
Examples
--------
<skip>
>>> frame.column_names
[u'name', u'age', u'tenure', u'phone']
</skip>
"""
return [name for name, data_type in self.schema]
# Frame Operations
from sparktk.frame.ops.add_columns import add_columns
from sparktk.frame.ops.append import append
from sparktk.frame.ops.assign_sample import assign_sample
from sparktk.frame.ops.bin_column import bin_column
from sparktk.frame.ops.binary_classification_metrics import binary_classification_metrics
from sparktk.frame.ops.box_cox import box_cox
from sparktk.frame.ops.categorical_summary import categorical_summary
from sparktk.frame.ops.collect import collect
from sparktk.frame.ops.column_median import column_median
from sparktk.frame.ops.column_mode import column_mode
from sparktk.frame.ops.column_summary_statistics import column_summary_statistics
from sparktk.frame.ops.copy import copy
from sparktk.frame.ops.correlation import correlation
from sparktk.frame.ops.correlation_matrix import correlation_matrix
from sparktk.frame.ops.count import count
from sparktk.frame.ops.covariance import covariance
from sparktk.frame.ops.covariance_matrix import covariance_matrix
from sparktk.frame.ops.cumulative_percent import cumulative_percent
from sparktk.frame.ops.cumulative_sum import cumulative_sum
from sparktk.frame.ops.dot_product import dot_product
from sparktk.frame.ops.drop_columns import drop_columns
from sparktk.frame.ops.drop_duplicates import drop_duplicates
from sparktk.frame.ops.drop_rows import drop_rows
from sparktk.frame.ops.ecdf import ecdf
from sparktk.frame.ops.entropy import entropy
from sparktk.frame.ops.export_to_csv import export_to_csv
from sparktk.frame.ops.export_to_jdbc import export_to_jdbc
from sparktk.frame.ops.export_to_json import export_to_json
from sparktk.frame.ops.export_to_hbase import export_to_hbase
from sparktk.frame.ops.export_to_hive import export_to_hive
from sparktk.frame.ops.export_to_tensorflow import export_to_tensorflow
from sparktk.frame.ops.filter import filter
from sparktk.frame.ops.flatten_columns import flatten_columns
from sparktk.frame.ops.group_by import group_by
from sparktk.frame.ops.histogram import histogram
from sparktk.frame.ops.inspect import inspect
from sparktk.frame.ops.join_cross import join_cross
from sparktk.frame.ops.join_inner import join_inner
from sparktk.frame.ops.join_left import join_left
from sparktk.frame.ops.join_right import join_right
from sparktk.frame.ops.join_outer import join_outer
from sparktk.frame.ops.map_columns import map_columns
from sparktk.frame.ops.matrix_covariance_matrix import matrix_covariance_matrix
from sparktk.frame.ops.matrix_pca import matrix_pca
from sparktk.frame.ops.matrix_svd import matrix_svd
from sparktk.frame.ops.multiclass_classification_metrics import multiclass_classification_metrics
from sparktk.frame.ops.power_iteration_clustering import power_iteration_clustering
from sparktk.frame.ops.quantile_bin_column import quantile_bin_column
from sparktk.frame.ops.quantiles import quantiles
from sparktk.frame.ops.rename_columns import rename_columns
from sparktk.frame.ops.reverse_box_cox import reverse_box_cox
from sparktk.frame.ops.save import save
from sparktk.frame.ops.sort import sort
from sparktk.frame.ops.sortedk import sorted_k
from sparktk.frame.ops.take import take
from sparktk.frame.ops.tally import tally
from sparktk.frame.ops.tally_percent import tally_percent
from sparktk.frame.ops.timeseries_augmented_dickey_fuller_test import timeseries_augmented_dickey_fuller_test
from sparktk.frame.ops.timeseries_breusch_godfrey_test import timeseries_breusch_godfrey_test
from sparktk.frame.ops.timeseries_breusch_pagan_test import timeseries_breusch_pagan_test
from sparktk.frame.ops.timeseries_durbin_watson_test import timeseries_durbin_watson_test
from sparktk.frame.ops.timeseries_from_observations import timeseries_from_observations
from sparktk.frame.ops.timeseries_slice import timeseries_slice
from sparktk.frame.ops.to_pandas import to_pandas
from sparktk.frame.ops.topk import top_k
from sparktk.frame.ops.unflatten_columns import unflatten_columns
def load(path, tc=TkContext.implicit):
"""load Frame from given path"""
TkContext.validate(tc)
return tc.load(path, Frame)
class SchemaValidationReturn(PropertiesObject):
"""
Return value from schema validation that includes the rdd of validated values and the number of bad values
that were found.
"""
def __init__(self, validated_rdd, bad_value_count):
self._validated_rdd = validated_rdd
self._bad_value_count = bad_value_count
@property
def validated_rdd(self):
"""
RDD of values that have been casted to the data type specified by the frame's schema.
"""
return self._validated_rdd
@property
def bad_value_count(self):
"""
Number of values that were unable to be parsed to the data type specified by the schema.
"""
return self._bad_value_count
| apache-2.0 |
rvraghav93/scikit-learn | benchmarks/bench_glmnet.py | 111 | 3890 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
mlyundin/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
RayMick/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
rayNymous/nupic | examples/audiostream/audiostream_tp.py | 32 | 9991 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
See README.md for details.
"""
"""
numpy - the language of pyaudio (& everything else)
pyaudio - access to the mic via the soundcard
pyplot - to plot the sound frequencies
bitmaparray - encodes an array of indices into an SDR
TP10X2 - the C++ optimized temporal pooler (TP)
"""
import numpy
import pyaudio
import matplotlib.pyplot as plt
from nupic.encoders.sparse_pass_through_encoder import SparsePassThroughEncoder
from nupic.research.TP10X2 import TP10X2 as TP
class Visualizations:
def calcAnomaly(self, actual, predicted):
"""
Calculates the anomaly of two SDRs
Uses the equation presented on the wiki:
https://github.com/numenta/nupic/wiki/Anomaly-Score-Memo
To put this in terms of the temporal pooler:
A is the actual input array at a given timestep
P is the predicted array that was produced from the previous timestep(s)
[A - (A && P)] / [A]
Rephrasing as questions:
What bits are on in A that are not on in P?
How does that compare to total on bits in A?
Outputs 0 is there's no difference between P and A.
Outputs 1 if P and A are totally distinct.
Not a perfect metric - it doesn't credit proximity
Next step: combine with a metric for a spatial pooler
"""
combined = numpy.logical_and(actual, predicted)
delta = numpy.logical_xor(actual,combined)
delta_score = sum(delta)
actual_score = float(sum(actual))
return delta_score / actual_score
def compareArray(self, actual, predicted):
"""
Produce an array that compares the actual & predicted
'A' - actual
'P' - predicted
'E' - expected (both actual & predicted
' ' - neither an input nor predicted
"""
compare = []
for i in range(actual.size):
if actual[i] and predicted[i]:
compare.append('E')
elif actual[i]:
compare.append('A')
elif predicted[i]:
compare.append('P')
else:
compare.append(' ')
return compare
def hashtagAnomaly(self, anomaly):
"""
Basic printout method to visualize the anomaly score (scale: 1 - 50 #'s)
"""
hashcount = '#'
for i in range(int(anomaly / 0.02)):
hashcount += '#'
for j in range(int((1 - anomaly) / 0.02)):
hashcount += '.'
return hashcount
class AudioStream:
def __init__(self):
"""
Instantiate temporal pooler, encoder, audio sampler, filter, & freq plot
"""
self.vis = Visualizations()
"""
The number of columns in the input and therefore the TP
2**9 = 512
Trial and error pulled that out
numCols should be tested during benchmarking
"""
self.numCols = 2**9
sparsity = 0.10
self.numInput = int(self.numCols * sparsity)
"""
Create a bit map encoder
From the encoder's __init__ method:
1st arg: the total bits in input
2nd arg: the number of bits used to encode each input bit
"""
self.e = SparsePassThroughEncoder(self.numCols, 1)
"""
Sampling details
rate: The sampling rate in Hz of my soundcard
buffersize: The size of the array to which we will save audio segments (2^12 = 4096 is very good)
secToRecord: The length of each sampling
buffersToRecord: how many multiples of buffers are we recording?
"""
rate=44100
secToRecord=.1
self.buffersize=2**12
self.buffersToRecord=int(rate*secToRecord/self.buffersize)
if not self.buffersToRecord:
self.buffersToRecord=1
"""
Filters in Hertz
highHertz: lower limit of the bandpass filter, in Hertz
lowHertz: upper limit of the bandpass filter, in Hertz
max lowHertz = (buffersize / 2 - 1) * rate / buffersize
"""
highHertz = 500
lowHertz = 10000
"""
Convert filters from Hertz to bins
highpass: convert the highHertz into a bin for the FFT
lowpass: convert the lowHertz into a bin for the FFt
NOTES:
highpass is at least the 1st bin since most mics only pick up >=20Hz
lowpass is no higher than buffersize/2 - 1 (highest array index)
passband needs to be wider than size of numInput - not checking for that
"""
self.highpass = max(int(highHertz * self.buffersize / rate),1)
self.lowpass = min(int(lowHertz * self.buffersize / rate), self.buffersize/2 - 1)
"""
The call to create the temporal pooler region
"""
self.tp = TP(numberOfCols=self.numCols, cellsPerColumn=4,
initialPerm=0.5, connectedPerm=0.5,
minThreshold=10, newSynapseCount=10,
permanenceInc=0.1, permanenceDec=0.07,
activationThreshold=8,
globalDecay=0.02, burnIn=2,
checkSynapseConsistency=False,
pamLength=100)
"""
Creating the audio stream from our mic
"""
p = pyaudio.PyAudio()
self.inStream = p.open(format=pyaudio.paInt32,channels=1,rate=rate,input=True,frames_per_buffer=self.buffersize)
"""
Setting up the array that will handle the timeseries of audio data from our input
"""
self.audio = numpy.empty((self.buffersToRecord*self.buffersize),dtype="uint32")
"""
Print out the inputs
"""
print "Number of columns:\t" + str(self.numCols)
print "Max size of input:\t" + str(self.numInput)
print "Sampling rate (Hz):\t" + str(rate)
print "Passband filter (Hz):\t" + str(highHertz) + " - " + str(lowHertz)
print "Passband filter (bin):\t" + str(self.highpass) + " - " + str(self.lowpass)
print "Bin difference:\t\t" + str(self.lowpass - self.highpass)
print "Buffersize:\t\t" + str(self.buffersize)
"""
Setup the plot
Use the bandpass filter frequency range as the x-axis
Rescale the y-axis
"""
plt.ion()
bin = range(self.highpass,self.lowpass)
xs = numpy.arange(len(bin))*rate/self.buffersize + highHertz
self.freqPlot = plt.plot(xs,xs)[0]
plt.ylim(0, 10**12)
while True:
self.processAudio()
def processAudio (self):
"""
Sample audio, encode, send it to the TP
Pulls the audio from the mic
Conditions that audio as an SDR
Computes a prediction via the TP
Update the visualizations
"""
"""
Cycle through the multiples of the buffers we're sampling
Sample audio to store for each frame in buffersize
Mic voltage-level timeseries is saved as 32-bit binary
Convert that 32-bit binary into integers, and save to array for the FFT
"""
for i in range(self.buffersToRecord):
try:
audioString = self.inStream.read(self.buffersize)
except IOError:
print "Overflow error from 'audiostring = inStream.read(buffersize)'. Try decreasing buffersize."
quit()
self.audio[i*self.buffersize:(i + 1)*self.buffersize] = numpy.fromstring(audioString,dtype = "uint32")
"""
Get int array of strength for each bin of frequencies via fast fourier transform
Get the indices of the strongest frequencies (the top 'numInput')
Scale the indices so that the frequencies fit to within numCols
Pick out the unique indices (we've reduced the mapping, so we likely have multiples)
Encode those indices into an SDR via the SparsePassThroughEncoder
Cast the SDR as a float for the TP
"""
ys = self.fft(self.audio, self.highpass, self.lowpass)
fs = numpy.sort(ys.argsort()[-self.numInput:])
rfs = fs.astype(numpy.float32) / (self.lowpass - self.highpass) * self.numCols
ufs = numpy.unique(rfs)
actualInt = self.e.encode(ufs)
actual = actualInt.astype(numpy.float32)
"""
Pass the SDR to the TP
Collect the prediction SDR from the TP
Pass the prediction & actual SDRS to the anomaly calculator & array comparer
Update the frequency plot
"""
self.tp.compute(actual, enableLearn = True, computeInfOutput = True)
predictedInt = self.tp.getPredictedState().max(axis=1)
compare = self.vis.compareArray(actualInt, predictedInt)
anomaly = self.vis.calcAnomaly(actualInt, predictedInt)
print "." . join(compare)
print self.vis.hashtagAnomaly(anomaly)
self.freqPlot.set_ydata(ys)
plt.show(block = False)
plt.draw()
def fft(self, audio, highpass, lowpass):
"""
Fast fourier transform conditioning
Output:
'output' contains the strength of each frequency in the audio signal
frequencies are marked by its position in 'output':
frequency = index * rate / buffesize
output.size = buffersize/2
Method:
Use numpy's FFT (numpy.fft.fft)
Find the magnitude of the complex numbers returned (abs value)
Split the FFT array in half, because we have mirror frequencies
(they're the complex conjugates)
Use just the first half to apply the bandpass filter
Great info here: http://stackoverflow.com/questions/4364823/how-to-get-frequency-from-fft-result
"""
left,right = numpy.split(numpy.abs(numpy.fft.fft(audio)),2)
output = left[highpass:lowpass]
return output
audiostream = AudioStream()
| agpl-3.0 |
terkkila/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
russel1237/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
mwv/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
rishikksh20/data | us-weather-history/visualize_weather.py | 36 | 4799 | import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime
'''
This is an example to generate the Philadelphia, PA weather chart.
If you want to make the chart for another city, you will have to modify
this code slightly to read that city's data in, change the title, and
likely change the y-axis of the chart to fit your city's temperature range.
I also use a custom matplotlib style as the basis for these charts, which you
can find here: https://gist.githubusercontent.com/rhiever/d0a7332fe0beebfdc3d5/raw/223d70799b48131d5ce2723cd5784f39d7a3a653/tableau10.mplstyle
'''
weather_data = pd.read_csv('KPHL.csv', parse_dates=['date'])
print(weather_data.describe())
# Generate a bunch of histograms of the data to make sure that all of the data
# is in an expected range.
with plt.style.context('https://gist.githubusercontent.com/rhiever/d0a7332fe0beebfdc3d5/raw/223d70799b48131d5ce2723cd5784f39d7a3a653/tableau10.mplstyle'):
for column in weather_data.columns:
if column in ['date']:
continue
plt.figure()
plt.hist(weather_data[column].values)
plt.title(column)
plt.savefig('{}.png'.format(column))
# Make sure we're only plotting temperatures for July 2014 - June 2015
weather_data_subset = weather_data[weather_data['date'] >= datetime(year=2014, month=7, day=1)]
weather_data_subset = weather_data_subset[weather_data_subset['date'] < datetime(year=2015, month=7, day=1)].copy()
weather_data_subset['day_order'] = range(len(weather_data_subset))
day_order = weather_data_subset['day_order']
record_max_temps = weather_data_subset['record_max_temp'].values
record_min_temps = weather_data_subset['record_min_temp'].values
average_max_temps = weather_data_subset['average_max_temp'].values
average_min_temps = weather_data_subset['average_min_temp'].values
actual_max_temps = weather_data_subset['actual_max_temp'].values
actual_min_temps = weather_data_subset['actual_min_temp'].values
fig, ax1 = plt.subplots(figsize=(15, 7))
# Create the bars showing all-time record highs and lows
plt.bar(day_order, record_max_temps - record_min_temps, bottom=record_min_temps,
edgecolor='none', color='#C3BBA4', width=1)
# Create the bars showing average highs and lows
plt.bar(day_order, average_max_temps - average_min_temps, bottom=average_min_temps,
edgecolor='none', color='#9A9180', width=1)
# Create the bars showing this year's highs and lows
plt.bar(day_order, actual_max_temps - actual_min_temps, bottom=actual_min_temps,
edgecolor='black', linewidth=0.5, color='#5A3B49', width=1)
new_max_records = weather_data_subset[weather_data_subset.record_max_temp <= weather_data_subset.actual_max_temp]
new_min_records = weather_data_subset[weather_data_subset.record_min_temp >= weather_data_subset.actual_min_temp]
# Create the dots marking record highs and lows for the year
plt.scatter(new_max_records['day_order'].values + 0.5,
new_max_records['actual_max_temp'].values + 1.25,
s=15, zorder=10, color='#d62728', alpha=0.75, linewidth=0)
plt.scatter(new_min_records['day_order'].values + 0.5,
new_min_records['actual_min_temp'].values - 1.25,
s=15, zorder=10, color='#1f77b4', alpha=0.75, linewidth=0)
plt.ylim(-15, 111)
plt.xlim(-5, 370)
plt.yticks(range(-10, 111, 10), [r'{}$^\circ$'.format(x)
for x in range(-10, 111, 10)], fontsize=10)
plt.ylabel(r'Temperature ($^\circ$F)', fontsize=12)
month_beginning_df = weather_data_subset[weather_data_subset['date'].apply(lambda x: True if x.day == 1 else False)]
month_beginning_indeces = list(month_beginning_df['day_order'].values)
month_beginning_names = list(month_beginning_df['date'].apply(lambda x: x.strftime("%B")).values)
month_beginning_names[0] += '\n\'14'
month_beginning_names[6] += '\n\'15'
# Add the last month label manually
month_beginning_indeces += [weather_data_subset['day_order'].values[-1]]
month_beginning_names += ['July']
plt.xticks(month_beginning_indeces,
month_beginning_names,
fontsize=10)
ax2 = ax1.twiny()
plt.xticks(month_beginning_indeces,
month_beginning_names,
fontsize=10)
plt.xlim(-5, 370)
plt.grid(False)
ax3 = ax1.twinx()
plt.yticks(range(-10, 111, 10), [r'{}$^\circ$'.format(x)
for x in range(-10, 111, 10)], fontsize=10)
plt.ylim(-15, 111)
plt.grid(False)
plt.title('Philadelphia, PA\'s weather, July 2014 - June 2015\n\n', fontsize=20)
plt.savefig('philadelphia-weather-july14-june15.png')
| mit |
kevin-intel/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 3 | 22725 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# Modified by: Pete Green <[email protected]>
# License: BSD 3 clause
import sys
import re
import numpy as np
import warnings
from scipy.optimize import approx_fprime
import pytest
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct, ExpSineSquared
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import (
assert_array_less,
assert_almost_equal,
assert_array_almost_equal,
assert_allclose
)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
non_fixed_kernels = [kernel for kernel in kernels
if kernel != fixed_kernel]
@pytest.mark.parametrize('kernel', kernels)
def test_gpr_interpolation(kernel):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test the interpolating property for different kernels.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_gpr_interpolation_structured():
# Test the interpolating property for different kernels.
kernel = MiniSeqKernel(baseline_similarity_bounds='fixed')
X = ['A', 'B', 'C']
y = np.array([1, 2, 3])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(kernel(X, eval_gradient=True)[1].ravel(),
(1 - np.eye(len(X))).ravel())
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_lml_improving(kernel):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
gpr.log_marginal_likelihood(kernel.theta))
@pytest.mark.parametrize('kernel', kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) ==
gpr.log_marginal_likelihood())
@pytest.mark.parametrize('kernel', kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1]))
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_solution_inside_bounds(kernel):
# Test that hyperparameter-optimization remains in bounds#
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
@pytest.mark.parametrize('kernel', kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
@pytest.mark.parametrize('kernel', kernels)
def test_prior(kernel):
# Test that GP prior has mean 0 and identical variances.
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
@pytest.mark.parametrize('kernel', kernels)
def test_sample_statistics(kernel):
# Test that statistics of samples drawn from GP are correct.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert np.exp(gpr.kernel_.theta) == 1.0
@pytest.mark.parametrize('kernel', kernels)
@pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)])
def test_predict_cov_vs_std(kernel, target):
if sys.maxsize <= 2 ** 32 and sys.version_info[:2] == (3, 6):
pytest.xfail("This test may fail on 32bit Py3.6")
# Test that predicted std.-dev. is consistent with cov's diagonal.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert (np.exp(gpr.kernel_.theta[1]) >
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize('kernel', kernels)
def test_y_normalization(kernel):
"""
Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results. Note that, here,
'normalized y' refers to y that has been made zero mean and unit
variance.
"""
y_mean = np.mean(y)
y_std = np.std(y)
y_norm = (y - y_mean) / y_std
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_pred * y_std + y_mean
y_pred_std = y_pred_std * y_std
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
y_cov = y_cov * y_std**2
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_large_variance_y():
"""
Here we test that, when noramlize_y=True, our GP can produce a
sensible fit to training data whose variance is significantly
larger than unity. This test was made in response to issue #15612.
GP predictions are verified against predictions that were made
using GPy which, here, is treated as the 'gold standard'. Note that we
only investigate the RBF kernel here, as that is what was used in the
GPy implementation.
The following code can be used to recreate the GPy data:
--------------------------------------------------------------------------
import GPy
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
gpy.optimize()
y_pred_gpy, y_var_gpy = gpy.predict(X2)
y_pred_std_gpy = np.sqrt(y_var_gpy)
--------------------------------------------------------------------------
"""
# Here we utilise a larger variance version of the training data
y_large = 10 * y
# Standard GP with normalize_y=True
RBF_params = {'length_scale': 1.0}
kernel = RBF(**RBF_params)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_large)
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
# 'Gold standard' mean predictions from GPy
y_pred_gpy = np.array([15.16918303,
-27.98707845,
-39.31636019,
14.52605515,
69.18503589])
# 'Gold standard' std predictions from GPy
y_pred_std_gpy = np.array([7.78860962,
3.83179178,
0.63149951,
0.52745188,
0.86170042])
# Based on numerical experiments, it's reasonable to expect our
# GP's mean predictions to get within 7% of predictions of those
# made by GPy.
assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
# Based on numerical experiments, it's reasonable to expect our
# GP's std predictions to get within 15% of predictions of those
# made by GPy.
assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
@pytest.mark.parametrize('kernel', non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert (gpr.log_marginal_likelihood(gpr.kernel_.theta) >
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
message = (
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% kernel
)
with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
gpr.fit(X, y)
@pytest.mark.parametrize('kernel', kernels)
def test_duplicate_input(kernel):
# Test GPR can handle two different output-values for the same input.
gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpr = GaussianProcessRegressor(kernel=kernel)
warning_message = (
"The optimal value found for dimension 0 of parameter "
"length_scale is close to the specified upper bound "
"0.001. Increasing the bound and calling fit again may "
"find a better value."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
gpr.fit(X, y)
kernel_sum = (WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) +
RBF(length_scale_bounds=[1e3, 1e5]))
gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpr_sum.fit(X, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1., 2.],
length_scale_bounds=[1e1, 1e2])
gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
with pytest.warns(None) as record:
with warnings.catch_warnings():
# scipy 1.3.0 uses tostring which is deprecated in numpy
warnings.filterwarnings("ignore", "tostring", DeprecationWarning)
gpr_dims.fit(X_tile, y)
assert len(record) == 2
assert record[0].message.args[0] == ("The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
assert record[1].message.args[0] == ("The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value.")
def test_bound_check_fixed_hyperparameter():
# Regression test for issue #17943
# Check that having a hyperparameter with fixed bounds doesn't cause an
# error
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
kernel = k1 + k2
GaussianProcessRegressor(kernel=kernel).fit(X, y)
# FIXME: we should test for multitargets as well. However, GPR is broken:
# see: https://github.com/scikit-learn/scikit-learn/pull/19706
@pytest.mark.parametrize('kernel', kernels)
def test_constant_target(kernel):
"""Check that the std. dev. is affected to 1 when normalizing a constant
feature.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18318
NaN where affected to the target when scaling due to null std. dev. with
constant target.
"""
y_constant = np.ones(X.shape[0], dtype=np.float64)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_constant)
assert gpr._y_train_std == pytest.approx(1.0)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_allclose(y_pred, y_constant)
# set atol because we compare to zero
assert_allclose(np.diag(y_cov), 0., atol=1e-9)
def test_gpr_consistency_std_cov_non_invertible_kernel():
"""Check the consistency between the returned std. dev. and the covariance.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19936
Inconsistencies were observed when the kernel cannot be inverted (or
numerically stable).
"""
kernel = (C(8.98576054e+05, (1e-12, 1e12)) *
RBF([5.91326520e+02, 1.32584051e+03], (1e-12, 1e12)) +
WhiteKernel(noise_level=1e-5))
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
X_train = np.array([[0., 0.], [1.54919334, -0.77459667], [-1.54919334, 0.],
[0., -1.54919334], [0.77459667, 0.77459667],
[-0.77459667, 1.54919334]])
y_train = np.array([[-2.14882017e-10], [-4.66975823e+00], [4.01823986e+00],
[-1.30303674e+00], [-1.35760156e+00],
[3.31215668e+00]])
gpr.fit(X_train, y_train)
X_test = np.array([[-1.93649167, -1.93649167], [1.93649167, -1.93649167],
[-1.93649167, 1.93649167], [1.93649167, 1.93649167]])
pred1, std = gpr.predict(X_test, return_std=True)
pred2, cov = gpr.predict(X_test, return_cov=True)
assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
| bsd-3-clause |
hbldh/sudokuextract | run_efd.py | 1 | 2066 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`run`
==================
.. module:: run
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <[email protected]>
Created on 2016-01-14, 09:10
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from sudokuextract.extract import extract_sudoku
from sudokuextract.ml import fit
from sudokuextract.utils import download_image, load_image
from sudokuextract import data
#images, labels, X, y = data.create_mnist_dataset()
#data.save_training_data(X, y, data_source='mnist')
#images, labels, X, y = data.create_data_set_from_images('~/Documents/SudokuExtract_Train', force=True)
#data.save_training_data(X, y, data_source='se')
#data.fetch_all_xanadoku_images('~/Documents/SudokuExtract_Test', 'bjornbar')
#image_url = "https://static-secure.guim.co.uk/sys-images/Guardian/Pix/pictures/2013/2/27/1361977880123/Sudoku2437easy.jpg"
the_image = download_image("https://res.cloudinary.com/hzlcxa6rf/image/upload/56d9d0df9f94ac0009519152")
#the_image = load_image('~/Documents/SudokuExtract_Test/56d9aad9ae834500099af4da.jpg')
#the_image = load_image('~/Documents/SudokuExtract_Test/56e006a63e921a0009d40071.jpg')
#the_image = load_image('~/Documents/SudokuExtract_Test/56e0053e3e921a0009d40070.jpg')
#the_image = load_image('~/Documents/SudokuExtract_Test/56e0a6237303ae0009e9a994.jpg')
#the_image = load_image('~/Documents/SudokuExtract_Train/i1.png')
#the_image = load_image('tests/img18.jpg')
#the_image = the_image.rotate(-90)
preds, sudoku, subimage = extract_sudoku(the_image, force=True)
import matplotlib.pyplot as plt
ax = plt.subplot2grid((9, 9+9), (0, 0), colspan=9, rowspan=9)
ax.imshow(subimage, plt.cm.gray)
ax.axis('off')
for k in range(len(sudoku)):
for kk in range(len(sudoku[k])):
ax = plt.subplot2grid((9, 9 + 9), (k, 9+kk))
ax.imshow(sudoku[k][kk], plt.cm.gray)
ax.set_title(str(preds[k, kk]))
ax.axis('off')
plt.show()
| mit |
belltailjp/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
ianatpn/nupictest | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_agg.py | 69 | 11729 | """
An agg http://antigrain.com/ backend
Features that are implemented
* capstyles and join styles
* dashes
* linewidth
* lines, rectangles, ellipses
* clipping to a rectangle
* output to RGBA and PNG
* alpha blending
* DPI scaling properly - everything scales properly (dashes, linewidths, etc)
* draw polygon
* freetype2 w/ ft2font
TODO:
* allow save to file handle
* integrate screen dpi w/ ppi and text
"""
from __future__ import division
import numpy as npy
from matplotlib import verbose, rcParams
from matplotlib.backend_bases import RendererBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, maxdict
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont
from matplotlib.ft2font import FT2Font, LOAD_FORCE_AUTOHINT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from _backend_agg import RendererAgg as _RendererAgg
from matplotlib import _png
backend_version = 'v2.2'
class RendererAgg(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles
"""
debug=1
texd = maxdict(50) # a cache of tex image rasters
_fontd = maxdict(50)
def __init__(self, width, height, dpi):
if __debug__: verbose.report('RendererAgg.__init__', 'debug-annoying')
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
if __debug__: verbose.report('RendererAgg.__init__ width=%s, height=%s'%(width, height), 'debug-annoying')
self._renderer = _RendererAgg(int(width), int(height), dpi, debug=False)
if __debug__: verbose.report('RendererAgg.__init__ _RendererAgg done',
'debug-annoying')
#self.draw_path = self._renderer.draw_path # see below
self.draw_markers = self._renderer.draw_markers
self.draw_path_collection = self._renderer.draw_path_collection
self.draw_quad_mesh = self._renderer.draw_quad_mesh
self.draw_image = self._renderer.draw_image
self.copy_from_bbox = self._renderer.copy_from_bbox
self.restore_region = self._renderer.restore_region
self.tostring_rgba_minimized = self._renderer.tostring_rgba_minimized
self.mathtext_parser = MathTextParser('Agg')
self.bbox = Bbox.from_bounds(0, 0, self.width, self.height)
if __debug__: verbose.report('RendererAgg.__init__ done',
'debug-annoying')
def draw_path(self, gc, path, transform, rgbFace=None):
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if nmax > 100 and npts > nmax and path.should_simplify and rgbFace is None:
nch = npy.ceil(npts/float(nmax))
chsize = int(npy.ceil(npts/nch))
i0 = npy.arange(0, npts, chsize)
i1 = npy.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace)
def draw_mathtext(self, gc, x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if __debug__: verbose.report('RendererAgg.draw_mathtext',
'debug-annoying')
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
x = int(x) + ox
y = int(y) - oy
self._renderer.draw_text_image(font_image, x, y + 1, angle, gc)
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
Render the text
"""
if __debug__: verbose.report('RendererAgg.draw_text', 'debug-annoying')
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
font = self._get_agg_font(prop)
if font is None: return None
if len(s) == 1 and ord(s) > 127:
font.load_char(ord(s), flags=LOAD_FORCE_AUTOHINT)
else:
# We pass '0' for angle here, since it will be rotated (in raster
# space) in the following call to draw_text_image).
font.set_text(s, 0, flags=LOAD_FORCE_AUTOHINT)
font.draw_glyphs_to_bitmap()
#print x, y, int(x), int(y)
self._renderer.draw_text_image(font.get_image(), int(x), int(y) + 1, angle, gc)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
# passing rgb is a little hack to make cacheing in the
# texmanager more efficient. It is not meant to be used
# outside the backend
"""
if ismath=='TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: descent of TeX text (I am imitating backend_ps here -JKS)
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
font = self._get_agg_font(prop)
font.set_text(s, 0.0, flags=LOAD_FORCE_AUTOHINT) # the width and height of unrotated string
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def draw_tex(self, gc, x, y, s, prop, angle):
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key)
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = npy.array(Z * 255.0, npy.uint8)
self._renderer.draw_text_image(Z, x, y, angle, gc)
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def _get_agg_font(self, prop):
"""
Get the font for text instance t, cacheing for efficiency
"""
if __debug__: verbose.report('RendererAgg._get_agg_font',
'debug-annoying')
key = hash(prop)
font = self._fontd.get(key)
if font is None:
fname = findfont(prop)
font = self._fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self._fontd[fname] = font
self._fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
if __debug__: verbose.report('RendererAgg.points_to_pixels',
'debug-annoying')
return points*self.dpi/72.0
def tostring_rgb(self):
if __debug__: verbose.report('RendererAgg.tostring_rgb',
'debug-annoying')
return self._renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('RendererAgg.tostring_argb',
'debug-annoying')
return self._renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('RendererAgg.buffer_rgba',
'debug-annoying')
return self._renderer.buffer_rgba(x,y)
def clear(self):
self._renderer.clear()
def option_image_nocomposite(self):
# It is generally faster to composite each image directly to
# the Figure, and there's no file size benefit to compositing
# with the Agg backend
return True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if __debug__: verbose.report('backend_agg.new_figure_manager',
'debug-annoying')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasAgg(thisFig)
manager = FigureManagerBase(canvas, num)
return manager
class FigureCanvasAgg(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def copy_from_bbox(self, bbox):
renderer = self.get_renderer()
return renderer.copy_from_bbox(bbox)
def restore_region(self, region):
renderer = self.get_renderer()
return renderer.restore_region(region)
def draw(self):
"""
Draw the figure using the renderer
"""
if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying')
self.renderer = self.get_renderer()
self.figure.draw(self.renderer)
def get_renderer(self):
l, b, w, h = self.figure.bbox.bounds
key = w, h, self.figure.dpi
try: self._lastKey, self.renderer
except AttributeError: need_new_renderer = True
else: need_new_renderer = (self._lastKey != key)
if need_new_renderer:
self.renderer = RendererAgg(w, h, self.figure.dpi)
self._lastKey = key
return self.renderer
def tostring_rgb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_rgb',
'debug-annoying')
return self.renderer.tostring_rgb()
def tostring_argb(self):
if __debug__: verbose.report('FigureCanvasAgg.tostring_argb',
'debug-annoying')
return self.renderer.tostring_argb()
def buffer_rgba(self,x,y):
if __debug__: verbose.report('FigureCanvasAgg.buffer_rgba',
'debug-annoying')
return self.renderer.buffer_rgba(x,y)
def get_default_filetype(self):
return 'png'
def print_raw(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
renderer._renderer.write_rgba(filename_or_obj)
renderer.dpi = original_dpi
print_rgba = print_raw
def print_png(self, filename_or_obj, *args, **kwargs):
FigureCanvasAgg.draw(self)
renderer = self.get_renderer()
original_dpi = renderer.dpi
renderer.dpi = self.figure.dpi
if is_string_like(filename_or_obj):
filename_or_obj = file(filename_or_obj, 'wb')
_png.write_png(renderer._renderer.buffer_rgba(0, 0),
renderer.width, renderer.height,
filename_or_obj, self.figure.dpi)
renderer.dpi = original_dpi
| gpl-3.0 |
dankolbman/NumericalAnalysis | Homeworks/HW2/Problem3.py | 1 | 1217 | import math
import copy
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
font = {'family' : 'normal',
'size' : 24}
rc('font', **font)
def interp(t, xpts, ypts):
x = copy.deepcopy(xpts)
y = copy.deepcopy(ypts)
# Iterate through the first N-1 points
for i in range(1,len(xpts)):
for j in range(0,len(xpts)-1):
x[j] = (1-t)*x[j] + t*x[j+1]
y[j] = (1-t)*y[j] + t*y[j+1]
return x[0], y[0]
xt = [ (1+6*(t/100)**2+2*(t/100)**3) for t in range(0, 100) ]
yt = [ (1-(t/100)+(t/100)**3) for t in range(0, 100) ]
xpts = [ 1, 1, 3, 9 ]
ypts = [ 1, 2/3, 1/3, 1 ]
x = []
y = []
for t in range(0,1000):
x1, y1 = interp(t/1000, xpts, ypts)
x.append(x1)
y.append(y1)
plt.plot(xt, yt, 'r', label='Parmetric Curve', linewidth=10, alpha=0.5)
plt.plot(x, y, 'b', label='Bezier Curve', linewidth=3, alpha=1.0)
plt.plot(xpts, ypts, 'ko-', label='Control Points', markersize=10)
plt.xlim( [-1, 10] )
plt.ylim( [0.2, 1.2] )
plt.legend(loc=9, fontsize=16)
plt.savefig('Problem3.png')
plt.show()
| mit |
IndraVikas/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
arjoly/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
janpipek/boadata | boadata/data/xarray_types.py | 1 | 3953 | from typing import List, Tuple, Optional
import xarray as xr
from boadata.core import DataObject
from .mixins import (
CopyableMixin,
GetItemMixin,
NumericalMixin,
SetItemMixin,
StatisticsMixin,
)
# TODO: Limit arithmetic operations where the coordinates don't match
class _XarrayBase(
DataObject, GetItemMixin, StatisticsMixin, NumericalMixin, CopyableMixin
):
@property
def axes(self) -> List[str]:
return list(self.inner_data.coords.keys())
def __to_pandas_data_frame__(self):
return DataObject.from_native(self.inner_data.to_dataframe())
class XarrayDatasetBase(_XarrayBase, SetItemMixin):
@property
def shape(self) -> Tuple[int, ...]:
# TODO: This is probably completely wrong!!!
return (len(self.axes),) + self.inner_data[self.columns[0]].shape
@property
def columns(self):
return list(self.inner_data.data_vars.keys())
def add_column(self, key, expression):
if isinstance(expression, str):
try:
result = self.evaluate(expression, wrap=False)
self.inner_data = self.inner_data.merge({key: (self.axes, result)})
except:
raise RuntimeError("Error when evaluating {0}".format(expression))
else:
raise RuntimeError("Cannot add column {0} from {1}".format(key, expression))
return self
def _safe_rename(self, a_dict):
safe_prefix = "safe" + "_".join(
self.axes + self.columns + list(a_dict.values())
)
dict1 = {key: safe_prefix + value for key, value in a_dict.items()}
dict2 = {safe_prefix + value: value for _, value in a_dict.items()}
self.inner_data = self.inner_data.rename(dict1).rename(dict2)
def rename_columns(self, col_dict):
if any((col not in self.columns for col in col_dict.keys())):
raise RuntimeError("Column not present")
else:
self._safe_rename(col_dict)
def rename_axes(self, ax_dict):
if any((col not in self.axes for col in ax_dict.keys())):
raise RuntimeError("Column not present")
else:
self._safe_rename(ax_dict)
def __repr__(self):
return "{0}({1} -> {2}, shape={3})".format(
self.__class__.__name__,
", ".join(self.axes),
", ".join(self.columns),
self.shape,
)
real_type = xr.Dataset
class XarrayDataArrayBase(_XarrayBase):
real_type = xr.DataArray
def __repr__(self):
return "{0}({1}, shape={2}, dtype={3})".format(
self.__class__.__name__, ", ".join(self.axes), self.shape, self.dtype
)
@property
def dtype(self):
return self.inner_data.data.dtype
def __to_numpy_array__(self):
return DataObject.from_native(self.inner_data.data)
@DataObject.register_type(default=True)
class XarrayDataset(XarrayDatasetBase):
type_name = "xarray_dataset"
@DataObject.register_type(default=True)
class XarrayDataArray(XarrayDataArrayBase):
type_name = "xarray_data_array"
@classmethod
def from_native(cls, native_object, **kwargs):
if not native_object.ndim:
return native_object.dtype.type(native_object)
else:
if isinstance(native_object, XarrayDataArrayBase):
return native_object.convert(cls.type_name, **kwargs)
return cls(inner_data=native_object, **kwargs)
@classmethod
def __from_pandas_data_frame__(cls, origin: "boadata.data.PandasDataFrame", value_column: Optional[str] = None) -> "XarrayDataArray":
if not value_column:
value_column = origin.columns[-1]
axis_columns = [column for column in origin.columns if column != value_column]
df = origin.inner_data.set_index(axis_columns)
data = xr.Dataset.from_dataframe(df)[value_column]
return cls(inner_data=data, source=origin)
| mit |
RomainBrault/scikit-learn | sklearn/ensemble/voting_classifier.py | 11 | 11341 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals.joblib import Parallel, delayed
from ..utils.validation import has_fit_parameter, check_is_fitted
from ..utils.metaestimators import _BaseComposition
def _parallel_fit_estimator(estimator, X, y, sample_weight):
"""Private function used to fit an estimator within a job."""
if sample_weight is not None:
estimator.fit(X, y, sample_weight)
else:
estimator.fit(X, y)
return estimator
class VotingClassifier(_BaseComposition, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to `None` using
``set_params``.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for ``fit``.
If -1, then the number of jobs is set to the number of cores.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not `None`.
classes_ : array-like, shape = [n_predictions]
The classes labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None, n_jobs=1):
self.estimators = estimators
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
@property
def named_estimators(self):
return dict(self.estimators)
def fit(self, X, y, sample_weight=None):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.estimators is None or len(self.estimators) == 0:
raise AttributeError('Invalid `estimators` attribute, `estimators`'
' should be a list of (string, estimator)'
' tuples')
if (self.weights is not None and
len(self.weights) != len(self.estimators)):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
if sample_weight is not None:
for name, step in self.estimators:
if not has_fit_parameter(step, 'sample_weight'):
raise ValueError('Underlying estimator \'%s\' does not'
' support sample weights.' % name)
names, clfs = zip(*self.estimators)
self._validate_names(names)
n_isnone = np.sum([clf is None for _, clf in self.estimators])
if n_isnone == len(self.estimators):
raise ValueError('All estimators are None. At least one is '
'required to be a classifier!')
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
transformed_y = self.le_.transform(y)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_parallel_fit_estimator)(clone(clf), X, transformed_y,
sample_weight)
for clf in clfs if clf is not None)
return self
@property
def _weights_not_none(self):
"""Get the weights of not `None` estimators"""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators,
self.weights) if est[1] is not None]
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(
np.bincount(x, weights=self._weights_not_none)),
axis=1, arr=predictions.astype('int'))
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
check_is_fitted(self, 'estimators_')
avg = np.average(self._collect_probas(X), axis=0,
weights=self._weights_not_none)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilities calculated by each classifier.
If `voting='hard'`:
array-like = [n_samples, n_classifiers]
Class labels predicted by each classifier.
"""
check_is_fitted(self, 'estimators_')
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def set_params(self, **params):
""" Setting the parameters for the voting classifier
Valid parameter keys can be listed with get_params().
Parameters
----------
params: keyword arguments
Specific parameters using e.g. set_params(parameter_name=new_value)
In addition, to setting the parameters of the ``VotingClassifier``,
the individual classifiers of the ``VotingClassifier`` can also be
set or replaced by setting them to None.
Examples
--------
# In this example, the RandomForestClassifier is removed
clf1 = LogisticRegression()
clf2 = RandomForestClassifier()
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)]
eclf.set_params(rf=None)
"""
super(VotingClassifier, self)._set_params('estimators', **params)
return self
def get_params(self, deep=True):
""" Get the parameters of the VotingClassifier
Parameters
----------
deep: bool
Setting it to True gets the various classifiers and the parameters
of the classifiers as well
"""
return super(VotingClassifier,
self)._get_params('estimators', deep=deep)
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
timdelbruegger/pyquaternion | demo.py | 1 | 3743 | """
This file is part of the pyquaternion python module
Author: Kieran Wynn
Website: https://github.com/KieranWynn/pyquaternion
Documentation: http://kieranwynn.github.io/pyquaternion/
Version: 1.0.0
License: The MIT License (MIT)
Copyright (c) 2015 Kieran Wynn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
demo.py - Demo of pyquaternion using matplotlib
"""
from quaternion import Quaternion
import numpy as np
from scipy import integrate
from math import pi
#import matplotlib
#matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
def generate_quaternion():
q1 = Quaternion.random()
q2 = Quaternion.random()
while True:
for q in Quaternion.intermediates(q1, q2, 20, include_endpoints=True):
yield q
#q1, q2 = q2, q1
q1 = q2
q2 = Quaternion.random()
quaternion_generator = generate_quaternion()
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
#ax.axis('off')
# use a different color for each axis
colors = ['r', 'g', 'b']
# set up lines and points
lines = sum([ax.plot([], [], [], c=c)
for c in colors], [])
startpoints = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
endpoints = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
# prepare the axes limits
ax.set_xlim((-8, 8))
ax.set_ylim((-8, 8))
ax.set_zlim((-8, 8))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line in lines:
line.set_data([], [])
line.set_3d_properties([])
return lines
# animation function. This will be called sequentially with the frame number
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
#i = (2 * i) % x_t.shape[1]
q = next(quaternion_generator)
#print("q:", q)
for line, start, end in zip(lines, startpoints, endpoints):
#end *= 5
start = q.rotate(start)
end = q.rotate(end)
line.set_data([start[0], end[0]], [start[1], end[1]])
line.set_3d_properties([start[2], end[2]])
#pt.set_data(x[-1:], y[-1:])
#pt.set_3d_properties(z[-1:])
#ax.view_init(30, 0.6 * i)
fig.canvas.draw()
return lines
# instantiate the animator.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=500, interval=30, blit=False)
# Save as mp4. This requires mplayer or ffmpeg to be installed
#anim.save('lorentz_attractor.mp4', fps=15, extra_args=['-vcodec', 'libx264'])
plt.show() | mit |
kevin-intel/scikit-learn | examples/inspection/plot_permutation_importance_multicollinear.py | 5 | 5029 | """
=================================================================
Permutation Importance with Multicollinear or Correlated Features
=================================================================
In this example, we compute the permutation importance on the Wisconsin
breast cancer dataset using :func:`~sklearn.inspection.permutation_importance`.
The :class:`~sklearn.ensemble.RandomForestClassifier` can easily get about 97%
accuracy on a test dataset. Because this dataset contains multicollinear
features, the permutation importance will show that none of the features are
important. One approach to handling multicollinearity is by performing
hierarchical clustering on the features' Spearman rank-order correlations,
picking a threshold, and keeping a single feature from each cluster.
.. note::
See also
:ref:`sphx_glr_auto_examples_inspection_plot_permutation_importance.py`
"""
print(__doc__)
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import spearmanr
from scipy.cluster import hierarchy
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import train_test_split
# %%
# Random Forest Feature Importance on Breast Cancer Data
# ------------------------------------------------------
# First, we train a random forest on the breast cancer dataset and evaluate
# its accuracy on a test set:
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
clf = RandomForestClassifier(n_estimators=100, random_state=42)
clf.fit(X_train, y_train)
print("Accuracy on test data: {:.2f}".format(clf.score(X_test, y_test)))
# %%
# Next, we plot the tree based feature importance and the permutation
# importance. The permutation importance plot shows that permuting a feature
# drops the accuracy by at most `0.012`, which would suggest that none of the
# features are important. This is in contradiction with the high test accuracy
# computed above: some feature must be important. The permutation importance
# is calculated on the training set to show how much the model relies on each
# feature during training.
result = permutation_importance(clf, X_train, y_train, n_repeats=10,
random_state=42)
perm_sorted_idx = result.importances_mean.argsort()
tree_importance_sorted_idx = np.argsort(clf.feature_importances_)
tree_indices = np.arange(0, len(clf.feature_importances_)) + 0.5
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
ax1.barh(tree_indices,
clf.feature_importances_[tree_importance_sorted_idx], height=0.7)
ax1.set_yticks(tree_indices)
ax1.set_yticklabels(data.feature_names[tree_importance_sorted_idx])
ax1.set_ylim((0, len(clf.feature_importances_)))
ax2.boxplot(result.importances[perm_sorted_idx].T, vert=False,
labels=data.feature_names[perm_sorted_idx])
fig.tight_layout()
plt.show()
# %%
# Handling Multicollinear Features
# --------------------------------
# When features are collinear, permutating one feature will have little
# effect on the models performance because it can get the same information
# from a correlated feature. One way to handle multicollinear features is by
# performing hierarchical clustering on the Spearman rank-order correlations,
# picking a threshold, and keeping a single feature from each cluster. First,
# we plot a heatmap of the correlated features:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
corr = spearmanr(X).correlation
corr_linkage = hierarchy.ward(corr)
dendro = hierarchy.dendrogram(
corr_linkage, labels=data.feature_names.tolist(), ax=ax1, leaf_rotation=90
)
dendro_idx = np.arange(0, len(dendro['ivl']))
ax2.imshow(corr[dendro['leaves'], :][:, dendro['leaves']])
ax2.set_xticks(dendro_idx)
ax2.set_yticks(dendro_idx)
ax2.set_xticklabels(dendro['ivl'], rotation='vertical')
ax2.set_yticklabels(dendro['ivl'])
fig.tight_layout()
plt.show()
# %%
# Next, we manually pick a threshold by visual inspection of the dendrogram
# to group our features into clusters and choose a feature from each cluster to
# keep, select those features from our dataset, and train a new random forest.
# The test accuracy of the new random forest did not change much compared to
# the random forest trained on the complete dataset.
cluster_ids = hierarchy.fcluster(corr_linkage, 1, criterion='distance')
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
X_train_sel = X_train[:, selected_features]
X_test_sel = X_test[:, selected_features]
clf_sel = RandomForestClassifier(n_estimators=100, random_state=42)
clf_sel.fit(X_train_sel, y_train)
print("Accuracy on test data with features removed: {:.2f}".format(
clf_sel.score(X_test_sel, y_test)))
| bsd-3-clause |
helloTC/LearnPython | graph_proj/func_light.py | 1 | 7920 | import networkx as nx
import random
import operator
import numpy as np
from ATT.algorithm import surf_tools, tools
import matplotlib.pyplot as plt
from ATT.util import plotfig
from scipy import stats
from ATT.iofunc import iofiles
class light_smallwd(object):
def __init__(self, nodenum, neighk, p):
G = nx.watts_strogatz_graph(nodenum, neighk, p)
self._G = G
self._nodenum = nodenum
def get_graph(self):
"""
Get graph
"""
return self._G
def pointlight(self, strategy = 'degree'):
"""
Function for exploring project of light_smallworld
"""
green_collect = set()
blue_collect = set()
green_stablept = set()
blue_stablept = set()
allpt = set(range(self._nodenum))
restpt = allpt.difference(green_collect.union(blue_collect))
while len(restpt) != 0:
if strategy == 'degree':
green_sdpt = _extract_with_degree(self._G, restpt, option = 'descend')
elif strategy == 'random':
green_sdpt = random.choice(list(restpt))
elif strategy == 'hubvsrandom':
green_sdpt = _extract_with_degree(self._G, restpt)
elif strategy == 'hubvsworst':
green_sdpt = _extract_with_degree(self._G, restpt)
else:
raise Exception('Bad parameters')
green_stablept.add(green_sdpt)
green_collect.add(green_sdpt)
green_neigh = set(nx.neighbors(self._G, green_sdpt))
green_collect.update(green_neigh)
inter_collect = green_collect.intersection(blue_collect)
inter_collect.difference_update(green_stablept.union(blue_stablept))
blue_collect.difference_update(inter_collect)
restpt = allpt.difference(green_collect.union(blue_collect))
if len(restpt) == 0:
self.green_collect = green_collect
self.blue_collect = blue_collect
self.blue_stablept = blue_stablept
self.green_stablept = green_stablept
break
if strategy == 'degree':
blue_sdpt = _extract_with_degree(self._G, restpt, option = 'descend')
elif strategy == 'random':
blue_sdpt = random.choice(list(restpt))
elif strategy == 'hubvsrandom':
blue_sdpt = random.choice(list(restpt))
elif strategy == 'hubvsworst':
blue_sdpt = _extract_with_degree(self._G, restpt, option = 'ascend')
else:
raise Exception('Bad parameters')
blue_collect.add(blue_sdpt)
blue_stablept.add(blue_sdpt)
blue_neigh = set(nx.neighbors(self._G, blue_sdpt))
blue_collect.update(blue_neigh)
inter_collect = green_collect.intersection(blue_collect)
inter_collect.difference_update(green_stablept.union(blue_stablept))
green_collect.difference_update(inter_collect)
restpt = allpt.difference(green_collect.union(blue_collect))
self.green_collect = green_collect
self.blue_collect = blue_collect
self.green_stablept = green_stablept
self.blue_stablept = blue_stablept
def collect_diff(self):
return len(self.green_collect) - len(self.blue_collect)
def get_seedpt(self):
return self.green_stablept, self.blue_stablept
def get_collect(self):
return self.green_collect, self.blue_collect
def seed_degree(self):
return [G.degree()[i] for i in self.green_stablept], [G.degree()[i] for i in self.blue_stablept]
def total_seed_degree(self):
return np.sum(self.seed_degree()[0]), np.sum(self.seed_degree()[1])
def diff_degree(self):
return sum(self.seed_degree()[0]) - sum(lscls.seed_degree()[1])
def _extract_with_degree(G, restpt, option = 'descend'):
"""
"""
if option == 'descend':
rv = True
elif option == 'ascend':
rv = False
degree_pair = sorted(G.degree().items(), key=operator.itemgetter(1), reverse = rv)
degree_list = [i[0] for i in degree_pair]
index_degree_list = [i in list(restpt) for i in degree_list].index(1)
return degree_list[index_degree_list]
if __name__ == '__main__':
# This part is to compute difference by strategy to choose seed point with largest degree
# nodenum = [20, 30, 50, 90, 150, 300]
# neighk_perct = np.arange(0.1, 1, 0.1)
# p = 0.2
# numdif_k = []
# numdif_n = []
# for node in nodenum:
# neighk = [int(i*node) for i in neighk_perct]
# for k in neighk:
# lscls = light_smallwd(node, k, p)
# lscls.pointlight('degree')
# numdif_k.append(lscls.collect_diff())
# numdif_n.append(numdif_k)
# numdif_k = []
# numdif_n = np.array(numdif_n)
# ---------------------------------------------------------
# This part is to compute difference in random situation
# nodenum = 150
# neighk = 110
# p = 0.2
# n_perm = 5000
# numdif = []
# lscls = light_smallwd(nodenum, neighk, p)
# for i in range(n_perm):
# lscls.pointlight('random')
# numdif.append(lscls.collect_diff())
# ---------------------------------------------------------
nodenum = 100
neighk = 6
p = 0.9
j = 1
while 1:
iter_time = 500
print('iteration {}'.format(j))
j+=1
lscls = light_smallwd(nodenum, neighk, p)
G = lscls.get_graph()
diff_hubvsrandom = []
diff_random = []
diff_random_degree = []
lscls.pointlight('hubvsworst')
diff_hubvsworst = lscls.collect_diff()
for i in range(iter_time):
# print('{}'.format(i))
lscls.pointlight('hubvsrandom')
diff_hubvsrandom.append(lscls.collect_diff())
lscls.pointlight('random')
diff_random.append(lscls.collect_diff())
diff_random_degree.append(lscls.diff_degree())
diff_hubvsrandom = np.array(diff_hubvsrandom)
diff_random = np.array(diff_random)
p_sig = 1.0*len(diff_hubvsrandom[diff_hubvsrandom>diff_hubvsworst])/len(diff_hubvsrandom)
if (p_sig<0.01) & (len(diff_hubvsrandom[diff_hubvsrandom<0]) < 0.01*iter_time) & (np.min(diff_hubvsrandom)>-5):
break
G_edges = G.edges()
G_edges = [list(edge) for edge in G_edges]
gam_cls = surf_tools.GenAdjacentMatrix()
adjcent = gam_cls.from_edge(G.edges())
# Save files
iocsv_adjcent = iofiles.make_ioinstance('N100_P9_K6_G10_adj.csv')
iocsv_edges = iofiles.make_ioinstance('N100_P9_K6_G10_edges.csv')
iocsv_adjcent.save(adjcent)
iocsv_edges.save(np.array(G_edges))
G_degree = np.array(G.degree().values())
largenode = np.argsort(G_degree)[-6:]
pos = nx.random_layout(G)
nx.draw(G, pos, node_color = 'r')
plt.show()
plt.figure()
nx.draw(G, pos, node_color = 'r')
nx.draw_networkx_nodes(G, pos, nodelist = largenode.tolist(), node_color = 'b')
plt.show()
m = surf_tools.GenAdjacentMatrix()
adjmatrix = m.from_edge(G.edges())
plotmat = plotfig.make_figfunction('mat')
plotmat(adjmatrix)
plt.figure()
plt.hist(G_degree)
plt.show()
plotviolin = plotfig.make_figfunction('violin')
diff_data = np.concatenate((np.expand_dims(diff_hubvsrandom,axis=-1).T, np.expand_dims(diff_random,axis=-1).T))
plotviolin(diff_data.T, xticklabels = ['StrategyVsRandom', 'Random'])
plothist = plotfig.make_figfunction('hist')
plothist(diff_hubvsrandom, [], diff_hubvsworst, p_sig)
plotcorr = plotfig.make_figfunction('corr')
plotcorr(np.array(diff_random), np.array(diff_random_degree), ['Number of lights', 'difference of degree'])
| mit |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/tools/plotting.py | 1 | 28495 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.tools.plotting Contains convenient plotting functions.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import standard modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from skimage import exposure
# Import astronomical modules
from astropy.visualization import SqrtStretch, LogStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from astropy.visualization import MinMaxInterval, ZScaleInterval
from photutils import CircularAperture
# -----------------------------------------------------------------
colours = ['Pink','LightPink','HotPink','DeepPink','PaleVioletRed','MediumVioletRed','Red','LightSalmon','Salmon',
'DarkSalmon','LightCoral','IndianRed','Crimson','FireBrick','DarkRed','Red','Orange','OrangeRed','Tomato',
'Coral','DarkOrange','Orange','Gold','Yellow','Yellow','LightYellow','LemonChiffon','LightGoldenrodYellow',
'PapayaWhip','Moccasin','PeachPuff','PaleGoldenrod','Khaki','DarkKhaki','Brown','Cornsilk','BlanchedAlmond',
'Bisque','NavajoWhite','Wheat','BurlyWood','Tan','RosyBrown','SandyBrown','Goldenrod','DarkGoldenrod','Peru',
'Chocolate','SaddleBrown','Sienna','Brown','Maroon','DarkOliveGreen','Olive','OliveDrab','YellowGreen','LimeGreen',
'Lime','LawnGreen','Chartreuse','GreenYellow','SpringGreen','MediumSpringGreen','LightGreen','PaleGreen','DarkSeaGreen',
'MediumSeaGreen','SeaGreen','ForestGreen','Green','DarkGreen','Cyan','MediumAquamari','Aqua','Cyan','LightCyan',
'PaleTurquoise','Aquamarine','Turquoise','MediumTurquois','DarkTurquoise','LightSeaGreen','CadetBlue','DarkCyan',
'Teal','LightSteelBlue','PowderBlue','LightBlue','SkyBlue','LightSkyBlue','DeepSkyBlue','DodgerBlue',
'CornflowerBlue','SteelBlue','RoyalBlue','Blue','MediumBlue','DarkBlue','Navy','MidnightBlue','Lavender',
'Thistle','Plum','Violet','Orchid','Fuchsia','Magenta','MediumOrchid','MediumPurple','BlueViolet','DarkViolet',
'DarkOrchid','DarkMagenta','Purple','Indigo','DarkSlateBlue','SlateBlue','MediumSlateBlue','White','Snow',
'Honeydew','MintCream','Azure','AliceBlue','GhostWhite','WhiteSmoke','Seashell','Beige','OldLace','FloralWhite',
'Ivory','AntiqueWhite','Linen','LavenderBlush','MistyRose','Gainsboro','LightGray','Silver','DarkGray','Gray',
'DimGray','LightSlateGray','SlateGray','DarkSlateGray','Black']
pretty_colours = ["r", "dodgerblue", "purple", "darkorange", "lawngreen", "yellow", "darkblue", "teal", "darkgreen", "lightcoral", "crimson", "saddlebrown"]
filled_markers = ['o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd']
line_styles = ['-', '--', '-.', ':']
# -----------------------------------------------------------------
def plot_mask(mask, title=None, path=None, format=None):
"""
This function ...
:param mask:
:param title:
:param path:
:param format:
:return:
"""
# Make the plot
plt.figure(figsize=(7,7))
plt.imshow(mask, origin="lower", interpolation="nearest", cmap='Greys')
plt.xlim(0, mask.shape[1] - 1)
plt.ylim(0, mask.shape[0] - 1)
if title is not None: plt.title(title)
else: plt.title("Black means True")
if path is None: plt.show()
else: plt.savefig(path, format=format)
plt.close()
# -----------------------------------------------------------------
def plot_box(box, title=None, path=None, format=None, scale="log", interval="pts", cmap="viridis"):
"""
This function ...
:param box:
:param title:
:param path:
:param format:
:param scale:
:param interval:
:param cmap:
:return:
"""
# Other new colormaps: plasma, magma, inferno
# Normalization
if scale == "log": norm = ImageNormalize(stretch=LogStretch())
elif scale == "sqrt": norm = ImageNormalize(stretch=SqrtStretch())
#elif scale == "skimage": norm = exposure.equalize_hist
else: raise ValueError("Invalid option for 'scale'")
if interval == "zscale":
vmin, vmax = ZScaleInterval().get_limits(box)
elif interval == "pts":
# Determine the maximum value in the box and the mimimum value for plotting
vmin = max(np.nanmin(box), 0.)
vmax = 0.5 * (np.nanmax(box) + vmin)
elif isinstance(interval, tuple):
vmin = interval[0]
vmax = interval[1]
else: raise ValueError("Invalid option for 'interval'")
#if scale == "skimage":
# vmin = 0.0
# vmax = 1.0
# Make the plot
plt.figure(figsize=(7,7))
plt.imshow(box, origin="lower", interpolation="nearest", vmin=vmin, vmax=vmax, norm=norm, cmap=cmap)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
if title is not None: plt.title(title)
if path is None: plt.show()
else: plt.savefig(path, format=format)
plt.close()
# -----------------------------------------------------------------
def plot_peak_model(box, x_peak, y_peak, model, title=None, vmin=None, vmax=None):
"""
This function ...
:param box:
:param x_peak:
:param y_peak:
:param model:
:return:
"""
# Determine the maximum value in the box and the minimum value for plotting
if vmin is None: vmin = max(np.nanmin(box), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(box) + vmin)
# Create x and y meshgrid for plotting
y_plotvalues, x_plotvalues = np.mgrid[:box.shape[0], :box.shape[1]]
x_peak_pixel = int(round(x_peak))
y_peak_pixel = int(round(y_peak))
# Calculate the pixel value at the peak for the data, model and residual
peak_data_value = box[y_peak_pixel,x_peak_pixel]
peak_model_value = model(x_peak, y_peak)
peak_residual_value = peak_data_value - peak_model_value
# Plot the data with the best-fit model
plt.figure(figsize=(10,3))
plt.subplot(1,3,1)
plt.imshow(box, origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax, cmap="viridis")
plt.plot(x_peak, y_peak, ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
plt.title("Data " + str(peak_data_value))
plt.subplot(1,3,2)
plt.imshow(model(x_plotvalues, y_plotvalues), origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Model " + str(peak_model_value))
plt.subplot(1,3,3)
plt.imshow(box - model(x_plotvalues, y_plotvalues), origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Residual " + str(peak_residual_value))
# Set the main title
if title is not None: plt.suptitle(title, size=16)
# Show the plot
plt.show()
# -----------------------------------------------------------------
def plot_star(box, peak, model, title=None, vmin=None, vmax=None):
"""
This function ...
:param box:
:param peak:
:param model:
:param title:
:return:
"""
# Normalization
norm = ImageNormalize(stretch=SqrtStretch())
# Determine the maximum value in the box and the minimum value for plotting
if vmin is None: vmin = max(np.nanmin(box), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(box) + vmin)
# Evaluate the model and subtract it from the cutout
evaluated = box.evaluate_model(model)
subtracted = box - evaluated
# Create a figure
plt.figure(figsize=(10,3))
# Plot the box
plt.subplot(1,4,1)
plt.imshow(box, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.plot(peak.x, peak.y, ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
plt.xlim(0, box.xsize-1)
plt.ylim(0, box.ysize-1)
plt.title("Cutout")
# Plot the model
plt.subplot(1,4,2)
plt.imshow(evaluated, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.xlim(0, box.xsize-1)
plt.ylim(0, box.ysize-1)
plt.title("Model")
# Plot the subtracted box on the same scale as the original box and model
plt.subplot(1,4,3)
plt.imshow(subtracted, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.xlim(0, box.xsize-1)
plt.ylim(0, box.ysize-1)
plt.title("Residual")
# Plot the subtracted box on a narrower color scale
plt.subplot(1,4,4)
sp = plt.imshow(subtracted, origin='lower', interpolation="nearest", cmap="viridis")
plt.xlim(0, box.xsize-1)
plt.ylim(0, box.ysize-1)
plt.title("Residual")
plt.colorbar(sp, format="%.2f")
# Set the main title
if title is not None: plt.suptitle(title, size=16)
# Show the plot
plt.show()
# -----------------------------------------------------------------
def plot_peaks(box, x_peaks, y_peaks, radius=None, title=None, vmin=None, vmax=None):
"""
This function plots the data with peaks marked ...
:param box:
:param x_peaks:
:param y_peaks:
:return:
"""
# Determine the maximum value in the box and the minium value for plotting
if vmin is None: vmin = max(np.nanmin(box), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(box) + vmin)
# Set the normalization
norm = ImageNormalize(stretch=SqrtStretch())
# Make the plot
plt.figure(figsize=(8,2.5))
plt.imshow(box, origin='lower', norm=norm, interpolation='nearest', vmin=vmin, vmax=vmax, cmap="viridis")
if radius is None: plt.plot(x_peaks, y_peaks, ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
else:
positions = (x_peaks, y_peaks)
apertures = CircularAperture(positions, r=radius)
apertures.plot(color='green', lw=1.5, alpha=0.5)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
if title is not None: plt.title(title)
plt.show()
# -----------------------------------------------------------------
def plot_peak(box, x_peak, y_peak, radius=None, title=None):
"""
This function plots the data with peaks marked ...
:param box:
:param x_peaks:
:param y_peaks:
:return:
"""
plot_peaks(box, [x_peak], [y_peak], radius=radius, title=title)
# -----------------------------------------------------------------
def plot_peaks_models(box, x_peaks, y_peaks, models, vmin=None, vmax=None):
"""
This function plots the data with peaks marked and models subtracted
:param box:
:param x_peaks:
:param y_peaks:
:param models:
:return:
"""
# Determine the maximum value in the box and the minium value for plotting
if vmin is None: vmin = max(np.nanmin(box), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(box) + vmin)
# Create x and y meshgrid for plotting
y_plotvalues, x_plotvalues = np.mgrid[:box.shape[0], :box.shape[1]]
# Calculate the sum of all models
total_model = models[0]
for i in range(1, len(models)): total_model += models[i]
# Make the plot
plt.figure(figsize=(8,2.5))
plt.subplot(1,3,1)
plt.imshow(box, origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax, cmap="viridis")
plt.plot(x_peaks, y_peaks, ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
plt.title("Data")
plt.subplot(1,3,2)
plt.imshow(total_model(x_plotvalues, y_plotvalues), origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Model")
plt.subplot(1,3,3)
plt.imshow(box - total_model(x_plotvalues, y_plotvalues), origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Residual")
plt.show()
# -----------------------------------------------------------------
def plot_star_model(background, background_clipped, est_background, star, est_background_star, fitted_star, vmin=None, vmax=None):
"""
This function ...
:param background:
:param background_clipped:
:param interpolated_background:
:param star:
:param interpolated_background_star:
:param fitted_star:
:return:
"""
norm = ImageNormalize(stretch=SqrtStretch())
# Determine the maximum value in the box and the minimum value for plotting
if vmin is None: vmin = max(np.nanmin(background), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(background) + vmin)
# Plot the data with the best-fit model
plt.figure(figsize=(20,3))
plt.subplot(1,7,1)
plt.imshow(background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, background.shape[1]-1)
plt.ylim(0, background.shape[0]-1)
plt.title("Background")
plt.subplot(1,7,2)
plt.imshow(background_clipped, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, background_clipped.shape[1]-1)
plt.ylim(0, background_clipped.shape[0]-1)
plt.title("Sigma-clipped background")
plt.subplot(1,7,3)
plt.imshow(est_background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, est_background.shape[1]-1)
plt.ylim(0, est_background.shape[0]-1)
plt.title("Estimated background")
plt.subplot(1,7,4)
plt.imshow(star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, star.shape[1]-1)
plt.ylim(0, star.shape[0]-1)
plt.title("Star")
plt.subplot(1,7,5)
plt.imshow(star.data - est_background_star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, star.shape[1]-1)
plt.ylim(0, star.shape[0]-1)
plt.title("Star without background")
plt.subplot(1,7,6)
plt.imshow(fitted_star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, fitted_star.shape[1]-1)
plt.ylim(0, fitted_star.shape[0]-1)
plt.title("Fitted star")
plt.subplot(1,7,7)
plt.imshow(star.data - fitted_star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, star.shape[1]-1)
plt.ylim(0, star.shape[0]-1)
plt.title("Residual")
plt.show()
# -----------------------------------------------------------------
def plot_removal(cutout, mask, background, removed, title=None, vmin=None, vmax=None):
"""
This function ...
:param cutout:
:param mask:
:param background:
:param removed:
:param title:
:return:
"""
norm = ImageNormalize(stretch=SqrtStretch())
# Determine the maximum value in the box and the minimum value for plotting
if vmin is None: vmin = max(np.nanmin(cutout), 0.)
if vmax is None: vmax = 0.5 * (np.nanmax(cutout) + vmin)
# Plot the data with the best-fit model
plt.figure(figsize=(20,3))
plt.subplot(1,4,1)
plt.imshow(cutout, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, cutout.xsize-0.5)
plt.ylim(-0.5, cutout.ysize-0.5)
plt.title("Cutout")
plt.subplot(1,4,2)
plt.imshow(np.ma.masked_array(cutout, mask=mask), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, cutout.xsize-0.5)
plt.ylim(-0.5, cutout.ysize-0.5)
plt.title("Background mask")
plt.subplot(1,4,3)
plt.imshow(background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, background.xsize-0.5)
plt.ylim(-0.5, background.ysize-0.5)
plt.title("Estimated background")
plt.subplot(1,4,4)
plt.imshow(removed, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, background.xsize-0.5)
plt.ylim(-0.5, background.ysize-0.5)
plt.title("Cutout with star removed")
# Set the main title
if title is not None: plt.suptitle(title, size=16)
# Show the plot
plt.show()
# -----------------------------------------------------------------
def plot_source(cutout, mask, background, peaks=None, title=None, show=True, scale="log", frame=None, vmin=None, vmax=None):
"""
This function ...
:param background:
:return:
"""
if scale == "sqrt": norm = ImageNormalize(stretch=SqrtStretch())
elif scale == "log": norm = ImageNormalize(stretch=LogStretch())
else: raise ValueError("Invalid scale option")
if frame is not None:
gs1 = gridspec.GridSpec(3, 3)
gs1.update(left=0.05, right=0.48, wspace=0.05)
ax1 = plt.subplot(gs1[:-1, :])
ax2 = plt.subplot(gs1[-1, :-1])
ax3 = plt.subplot(gs1[-1, -1])
# Determine the maximum value in the box and the minimum value for plotting
#vmax = np.nanmax(cutout)
#vmin = np.nanmin(cutout) if vmax <= 0 else 0.0
vmin = np.nanmin(cutout)
vmax = 0.5 * (np.nanmax(cutout) + vmin)
#number = 6 if source_mask is not None else 5
number = 5
# Plot the data with the best-fit model
plt.figure(figsize=(20,3))
plt.subplot(1,number,1)
plt.imshow(cutout, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, cutout.xsize-0.5)
plt.ylim(-0.5, cutout.ysize-0.5)
plt.title("Cutout")
plt.subplot(1,number,2)
plt.imshow(np.ma.masked_array(cutout, mask=mask), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, cutout.xsize-0.5)
plt.ylim(-0.5, cutout.ysize-0.5)
plt.title("Masked source")
plt.subplot(1,number,3)
plt.imshow(background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, background.xsize-0.5)
plt.ylim(-0.5, background.ysize-0.5)
plt.title("Estimated background")
#plt.subplot(1,number,4)
#plt.imshow(np.ma.masked_array(cutout, mask=mask.inverse()), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax)
#plt.xlim(-0.5, cutout.xsize-0.5)
#plt.ylim(-0.5, cutout.ysize-0.5)
#plt.title("Masked background")
plt.subplot(1,number,4)
plt.imshow(cutout-background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
if peaks is not None: plt.plot(peaks[0], peaks[1], ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
plt.xlim(-0.5, cutout.xsize-0.5)
plt.ylim(-0.5, cutout.ysize-0.5)
plt.title("Background subtracted")
#plt.subplot(1,number,6)
#plt.imshow(np.ma.masked_array(cutout-background, mask=mask.inverse()), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax)
#plt.xlim(-0.5, cutout.xsize-0.5)
#plt.ylim(-0.5, cutout.ysize-0.5)
#plt.title("Background subtracted source")
replaced = cutout.copy()
replaced[mask] = background[mask]
plt.subplot(1,number,5)
plt.imshow(replaced, origin="lower", interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(-0.5, cutout.xsize - 0.5)
plt.ylim(-0.5, cutout.ysize - 0.5)
plt.title("Removed source (replaced by background)")
# Set the main title
if title is not None: plt.suptitle(title, size=16)
# Show the plot
if show: plt.show()
# -----------------------------------------------------------------
def plot_background_subtraction(background, background_clipped, est_background, star, est_background_star):
"""
This function ...
:param background:
:param background_clipped:
:param est_background:
:param star:
:param est_background_star:
:param peaks:
:return:
"""
norm = ImageNormalize(stretch=SqrtStretch())
# Determine the maximum value in the box and the minimum value for plotting
vmax = np.nanmax(background)
vmin = np.nanmin(background) if vmax <= 0 else 0.0
# Plot the data with the best-fit model
plt.figure(figsize=(20,3))
plt.subplot(1,5,1)
plt.imshow(background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, background.shape[1]-1)
plt.ylim(0, background.shape[0]-1)
plt.title("Background")
plt.subplot(1,5,2)
plt.imshow(background_clipped, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, background_clipped.shape[1]-1)
plt.ylim(0, background_clipped.shape[0]-1)
plt.title("Sigma-clipped background")
plt.subplot(1,5,3)
plt.imshow(est_background, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, est_background.shape[1]-1)
plt.ylim(0, est_background.shape[0]-1)
plt.title("Estimated background")
plt.subplot(1,5,4)
plt.imshow(star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, star.shape[1]-1)
plt.ylim(0, star.shape[0]-1)
plt.title("Star")
plt.subplot(1,5,5)
plt.imshow(star.data - est_background_star, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, star.shape[1]-1)
plt.ylim(0, star.shape[0]-1)
plt.title("Star without background")
plt.show()
# -----------------------------------------------------------------
def plot_background_center(cutout, mask, peaks=None, title=None, show=True, scale="sqrt"):
"""
This function ...
:param x_center_rel:
:param y_center_rel:
:param x_center_back:
:param y_center_back:
:param background:
:param background_mask:
:param cutout:
:return:
"""
if scale == "sqrt": norm = ImageNormalize(stretch=SqrtStretch())
elif scale == "log": norm = ImageNormalize(stretch=LogStretch())
else: raise ValueError("Invalid scale option")
# Determine the maximum value in the box and the minimum value for plotting
vmax = np.nanmax(cutout)
vmin = np.nanmin(cutout) if vmax <= 0 else 0.0
# Plot the data with the best-fit model
plt.figure(figsize=(10,4))
plt.subplot(1,3,1)
plt.imshow(cutout, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0.5, cutout.xsize-0.5)
plt.ylim(0.5, cutout.ysize-0.5)
plt.title("Cutout")
plt.subplot(1,3,2)
plt.imshow(np.ma.masked_array(cutout, mask=mask), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0.5, cutout.xsize-0.5)
plt.ylim(0.5, cutout.ysize-0.5)
plt.title("Masked source")
plt.subplot(1,3,3)
plt.imshow(np.ma.masked_array(cutout, mask=mask.inverse()), origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
if peaks is not None: plt.plot(peaks[0], peaks[1], ls='none', color='white', marker='+', ms=40, lw=10, mew=4)
plt.xlim(0.5, cutout.xsize-0.5)
plt.ylim(0.5, cutout.ysize-0.5)
plt.title("Masked background")
# Set the main title
if title is not None: plt.suptitle(title, size=16)
# Show the plot
if show: plt.show()
# -----------------------------------------------------------------
def plot_difference(box_a, box_b, share_colorscale=False, title=None):
"""
This function ...
:param box_a:
:param box_b:
:param share_colorscale:
:return:
"""
#norm = ImageNormalize(stretch=SqrtStretch())
norm = ImageNormalize(stretch=LogStretch())
# Determine the maximum value in the box and the minimum value for plotting
#vmax = np.nanmax(box_a)
#vmin = np.nanmin(box_a) if vmax <= 0 else 0.0
vmin = np.nanmin(box_a)
vmax = 0.5 * (np.nanmax(box_b) + vmin)
# Plot the data with the best-fit model
plt.figure(figsize=(8,2.5))
plt.subplot(1,3,1)
#plt.imshow(box_a, origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax)
plt.imshow(box_a, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, box_a.shape[1]-1)
plt.ylim(0, box_a.shape[0]-1)
plt.title("Data a")
plt.subplot(1,3,2)
#plt.imshow(box_b, origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax)
plt.imshow(box_b, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.xlim(0, box_a.shape[1]-1)
plt.ylim(0, box_a.shape[0]-1)
plt.title("Data b")
plt.subplot(1,3,3)
if share_colorscale:
plt.imshow(box_a - box_b, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.xlim(0, box_a.shape[1]-1)
plt.ylim(0, box_a.shape[0]-1)
plt.title("Residual")
#plt.imshow(box_a - box_b, origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax)
else:
residualimage = plt.imshow(box_a - box_b, origin='lower', interpolation="nearest", cmap="viridis")
plt.xlim(0, box_a.shape[1]-1)
plt.ylim(0, box_a.shape[0]-1)
plt.title("Residual")
plt.colorbar(residualimage, format="%.2f")
# Set the main title
if title is not None: plt.suptitle(title, size=16)
plt.show()
# -----------------------------------------------------------------
def plot_difference_value(box, value, share_colorscale=False):
"""
This function ...
:param box:
:param value:
:param share_colorscale:
:return:
"""
norm = ImageNormalize(stretch=SqrtStretch())
# Determine the maximum value in the box and the minimum value for plotting
vmax = np.nanmax(box)
vmin = np.nanmin(box) if vmax <= 0 else 0.0
# Plot the data with the best-fit model
plt.figure(figsize=(8,2.5))
plt.subplot(1,3,1)
#plt.imshow(box_a, origin='lower', interpolation='nearest', vmin=vmin, vmax=vmax)
plt.imshow(box, origin='lower', interpolation="nearest", norm=norm, vmin=vmin, vmax=vmax, cmap="viridis")
plt.xlim(0, box.shape[1]-1)
plt.ylim(0, box.shape[0]-1)
plt.title("Data")
plt.subplot(1,3,2)
#plt.imshow(box_b, origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax)
value_box = np.full(box.shape, value)
#print np.median(box-value_box)
plt.imshow(value_box, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Constant value")
plt.subplot(1,3,3)
if share_colorscale:
plt.imshow(box - value_box, origin='lower', interpolation="nearest", norm=norm, vmin=0.0, vmax=vmax, cmap="viridis")
plt.title("Residual")
#plt.imshow(box_a - box_b, origin='lower', interpolation='nearest', vmin=0.0, vmax=vmax)
else:
residualimage = plt.imshow(box - value_box, origin='lower', interpolation="nearest", cmap="viridis")
plt.title("Residual")
plt.colorbar(residualimage, format="%.2f")
plt.show()
# -----------------------------------------------------------------
def plot_difference_model(box, model):
"""
This function ...
:param box:
:param model:
:return:
"""
# Determine the maximum value in the box and the minimum value for plotting
vmax = np.nanmax(box)
vmin = np.nanmin(box) if vmax <= 0 else 0.0
# Create x and y meshgrid for plotting
y_plotvalues, x_plotvalues = np.mgrid[:box.shape[0], :box.shape[1]]
# Evaluate the model in the box
model_box = model(x_plotvalues, y_plotvalues)
# Plot the data with the best-fit model
plt.figure(figsize=(8,2.5))
plt.subplot(1,3,1)
plt.imshow(box, origin='lower', interpolation="nearest", vmin=vmin, vmax=vmax, cmap="viridis")
plt.title("Data")
plt.subplot(1,3,2)
plt.imshow(model_box, origin='lower', interpolation="nearest", vmin=vmin, vmax=vmax, cmap="viridis")
plt.title("Model")
plt.subplot(1,3,3)
plt.imshow(box - model_box, origin='lower', interpolation="nearest", vmin=vmin, vmax=vmax, cmap="viridis")
plt.title("Residual")
plt.show()
# -----------------------------------------------------------------
| mit |
dsm054/pandas | pandas/tests/plotting/test_frame.py | 3 | 123748 | # coding: utf-8
""" Test cases for DataFrame.plot """
import pytest
import string
import warnings
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.core.dtypes.api import is_list_like
from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy.random import rand, randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# mpl >= 1.5.2 (or slightly below) throw AttributError
with pytest.raises((TypeError, AttributeError)):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
_check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=['red'])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0))
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color='')
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=['red', 'black'], style=['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ['-', '--']
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ['red', 'black']
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=['red', 'black'], style=['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
assert df.index.name == 'NAME'
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'), df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1),
figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)}, index=np.arange(99, -1, -1),
dtype=np.int64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({'y': [0., 1., 2., 3.]}, index=[1., 0., 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., np.nan, 3., 4., 5., 6.]},
index=[1., 0., 3., 2., np.nan, 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., 2., 3.], 'z': [91., 90., 93., 92.]})
ax = df.plot(x='z', y='y')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3, )
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax,
labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_groupby_boxplot_sharey(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14],
'b': [0.56, 0.84, 0.29, 0.56, 0.85],
'c': [0, 1, 2, 3, 1]},
index=[0, 1, 2, 3, 4])
# behavior without keyword
axes = df.groupby('c').boxplot()
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# set sharey=True should be identical
axes = df.groupby('c').boxplot(sharey=True)
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# sharey=False, all yticklabels should be visible
axes = df.groupby('c').boxplot(sharey=False)
expected = [True, True, True, True]
self._assert_ytickslabels_visibility(axes, expected)
def test_groupby_boxplot_sharex(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame({'a': [-1.43, -0.15, -3.70, -1.43, -0.14],
'b': [0.56, 0.84, 0.29, 0.56, 0.85],
'c': [0, 1, 2, 3, 1]},
index=[0, 1, 2, 3, 4])
# behavior without keyword
axes = df.groupby('c').boxplot()
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# set sharex=False should be identical
axes = df.groupby('c').boxplot(sharex=False)
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# sharex=True, yticklabels should be visible
# only for bottom plots
axes = df.groupby('c').boxplot(sharex=True)
expected = [False, False, True, True]
self._assert_xtickslabels_visibility(axes, expected)
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45,
fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {"numeric": np.array([1, 2, 5]),
"timedelta": [pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h")],
"datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")],
"datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00",
utc=True),
pd.to_datetime("2017-08-02 00:00:00",
utc=True)],
"text": ["This", "should", "fail"]}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (ax_numeric.get_lines()[0].get_data()[1] ==
testdata["numeric"].values).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (ax_timedelta.get_lines()[0].get_data()[1] ==
testdata["timedelta"].values).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_no_tz"].values).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_all_tz"].values).all()
with pytest.raises(TypeError):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
'datetime_mixed_tz',
strict=True)
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formater (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formater (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handels ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {"numeric": np.array([1, 2, 5]),
"period": [pd.Period('2017-08-01 00:00:00', freq='H'),
pd.Period('2017-08-01 02:00', freq='H'),
pd.Period('2017-08-02 00:00:00', freq='H')],
"categorical": pd.Categorical(["c", "b", "a"],
categories=["a", "b", "c"],
ordered=False),
"datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")]}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (ax_period.get_lines()[0].get_data()[1] ==
testdata["period"].values).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (ax_categorical.get_lines()[0].get_data()[1] ==
testdata["categorical"].values).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric",
y="datetime_mixed_tz")
assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_mixed_tz"].values).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=['w', 'x', 'y', 'z'])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6),
'x': rand(6),
'y': -rand(6),
'z': -rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1,
'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four'])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame({"A": range(4),
"B": range(1, 5),
"color": ['red', 'blue', 'blue', 'red']})
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y='A', color=df['color'])
result = [p.get_facecolor() for p in ax.patches]
expected = [(1., 0., 0., 1.),
(0., 0., 1., 1.),
(0., 0., 1., 1.),
(1., 0., 0., 1.)]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
position=0.2)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind='bar', stacked=True, width=1)
self._check_bar_alignment(df, kind='barh', stacked=False, width=1)
self._check_bar_alignment(df, kind='barh', stacked=True, width=1)
self._check_bar_alignment(df, kind='bar', subplots=True, width=1)
self._check_bar_alignment(df, kind='barh', subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20],
'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(np.random.randn(6, 5),
index=pd.Index(list('ABCDEF')),
columns=pd.Index(list('abcde')))
# categorical index must behave the same
df2 = pd.DataFrame(np.random.randn(6, 5),
index=pd.CategoricalIndex(list('ABCDEF')),
columns=pd.CategoricalIndex(list('abcde')))
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x='x')
with pytest.raises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
ax1 = df.plot.scatter(x='A label', y='B label')
ax2 = df.plot.scatter(x='A label', y='B label', c='C label')
vis1 = [vis.get_visible() for vis in
ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in
ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in
ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in
ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (ax1.xaxis.get_label().get_visible() ==
ax2.xaxis.get_label().get_visible())
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
ax = df.plot.hexbin('A label', 'B label', gridsize=12)
assert all(vis.get_visible() for vis in
ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in
ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array,
columns=['A label', 'B label', 'C label'])
fig, axes = plt.subplots(1, 2)
df.plot.scatter('A label', 'B label', c='C label', ax=axes[0])
df.plot.scatter('A label', 'B label', c='C label', ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points()
for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance,
colorbar_distance, atol=1e-7).all()
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self):
# GH 16199
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])})
with pytest.raises(ValueError) as ve:
df.plot(x='x', y='y', kind='scatter')
ve.match('requires y column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='x', kind='scatter')
ve.match('requires x column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='y', kind='scatter')
ve.match('requires x column to be numeric')
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
# n.b. there appears to be no public method
# to get the colorbar label
assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0]
.get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64))
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center', width=0.5,
position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position, grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min(p.get_x() for p in ax.patches)
max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min(p.get_y() for p in ax.patches)
max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(
df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
align='edge')
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([.1, 1., 10., 100])
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([.1, 1., 10., 100., 1000., 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# different warning on py3
if not PY3:
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box,
subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box() # default axes
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(result, None, expected_keys=[
'height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind='kde',
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
if plotting._compat._mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
ax = series.plot.hist(cumulative=True, bins=4, **kwargs)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y,
check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h,
check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x,
check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w,
check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True,
orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True,
orientation='horizontal')
self._check_box_coord(axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(
df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False, label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@pytest.mark.slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^',
1: '+',
2: 'o'}, {0: '^',
1: '+'}, ['^', '+', 'o'], ['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == 'None'
@pytest.mark.slow
@tm.capture_stdout
def test_line_colors(self):
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ['r', 'g', 'b']
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind='kde', color='DodgerBlue',
subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = 'k'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'],
linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'],
linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0],
default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 1, 0), '#123456')
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
import cycler
colors = list('rgbk')
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind='aasdf')
@pytest.mark.parametrize("x,y,lbl", [
(['B', 'C'], 'A', 'a'),
(['A'], ['B', 'C'], ['b', 'c']),
('A', ['B', 'C'], 'badlabel')
])
def test_invalid_xy_args(self, x, y, lbl):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame({"A": [1, 2], 'B': [3, 4], 'C': [5, 6]})
with pytest.raises(ValueError):
df.plot(x=x, y=y, label=lbl)
@pytest.mark.parametrize("x,y", [
('A', 'B'),
(['A'], 'B')
])
def test_invalid_xy_args_dup_cols(self, x, y):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list('AAB'))
with pytest.raises(ValueError):
df.plot(x=x, y=y)
@pytest.mark.parametrize("x,y,lbl,colors", [
('A', ['B'], ['b'], ['red']),
('A', ['B', 'C'], ['b', 'c'], ['red', 'blue']),
(0, [1, 2], ['bokeh', 'cython'], ['green', 'yellow'])
])
def test_y_listlike(self, x, y, lbl, colors):
# GH 19699: tests list-like y and verifies lbls & colors
df = DataFrame({"A": [1, 2], 'B': [3, 4], 'C': [5, 6]})
_check_plot_works(df.plot, x='A', y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
self._check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [
(0, 1, ['A', 'B']),
(1, 0, [0, 1])
])
def test_xy_args_integer(self, x, y, colnames):
# GH 20056: tests integer args for xy and checks col names
df = DataFrame({"A": [1, 2], 'B': [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
assert len(ax.collections) == 1
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
assert ax.collections[0].cmap.name == 'BuGn'
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
assert ax.collections[0].cmap.name == 'YlGn'
with pytest.raises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn', colormap='BuGn')
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True, labels=labels,
colors=color_args)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert ([x.get_text() for x in ax.get_legend().get_texts()] ==
base_expected[:i] + base_expected[i + 1:])
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'],
xerr=df_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df + 1).plot, yerr=df_err,
xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot,
yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'z': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
@td.xfail_if_mpl_2_2
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err,
yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
lines = []
errs = [c.lines
for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._core._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}),
plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'})
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with pytest.raises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
try: # mpl 1.5
with mpl.rc_context(
rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
except (AttributeError, KeyError): # mpl 1.4
with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize('method', ['line', 'barh', 'bar'])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (pd.DataFrame(np.random.randn(15, 2),
columns=list('AB'))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1))
fontsize = 20
sy = ['C', 'D']
kwargs = dict(secondary_y=sy, fontsize=fontsize,
mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax,
ylabelsize=fontsize)
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| bsd-3-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/util/print_versions.py | 9 | 4717 | import os
import platform
import sys
import struct
import subprocess
import codecs
def get_sys_info():
"Returns system information as a dict"
blob = []
# get full commit hash
commit = None
if os.path.isdir(".git") and os.path.isdir("pandas"):
try:
pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
so, serr = pipe.communicate()
except:
pass
else:
if pipe.returncode == 0:
commit = so
try:
commit = so.decode('utf-8')
except ValueError:
pass
commit = commit.strip().strip('"')
blob.append(('commit', commit))
try:
sysname, nodename, release, version, machine, processor = platform.uname(
)
blob.extend([
("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
("python-bits", struct.calcsize("P") * 8),
("OS", "%s" % (sysname)),
("OS-release", "%s" % (release)),
# ("Version", "%s" % (version)),
("machine", "%s" % (machine)),
("processor", "%s" % (processor)),
("byteorder", "%s" % sys.byteorder),
("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
("LANG", "%s" % os.environ.get('LANG', "None")),
])
except:
pass
return blob
def show_versions(as_json=False):
import imp
sys_info = get_sys_info()
deps = [
# (MODULE_NAME, f(mod) -> mod version)
("pandas", lambda mod: mod.__version__),
("nose", lambda mod: mod.__version__),
("pip", lambda mod: mod.__version__),
("setuptools", lambda mod: mod.__version__),
("Cython", lambda mod: mod.__version__),
("numpy", lambda mod: mod.version.version),
("scipy", lambda mod: mod.version.version),
("statsmodels", lambda mod: mod.__version__),
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("blosc", lambda mod: mod.__version__),
("bottleneck", lambda mod: mod.__version__),
("tables", lambda mod: mod.__version__),
("numexpr", lambda mod: mod.__version__),
("matplotlib", lambda mod: mod.__version__),
("openpyxl", lambda mod: mod.__version__),
("xlrd", lambda mod: mod.__VERSION__),
("xlwt", lambda mod: mod.__VERSION__),
("xlsxwriter", lambda mod: mod.__version__),
("lxml", lambda mod: mod.etree.__version__),
("bs4", lambda mod: mod.__version__),
("html5lib", lambda mod: mod.__version__),
("httplib2", lambda mod: mod.__version__),
("apiclient", lambda mod: mod.__version__),
("sqlalchemy", lambda mod: mod.__version__),
("pymysql", lambda mod: mod.__version__),
("psycopg2", lambda mod: mod.__version__),
("Jinja2", lambda mod: mod.__version__)
]
deps_blob = list()
for (modname, ver_f) in deps:
try:
try:
mod = imp.load_module(modname, *imp.find_module(modname))
except (ImportError):
import importlib
mod = importlib.import_module(modname)
ver = ver_f(mod)
deps_blob.append((modname, ver))
except:
deps_blob.append((modname, None))
if (as_json):
# 2.6-safe
try:
import json
except:
import simplejson as json
j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
if as_json == True:
print(j)
else:
with codecs.open(as_json, "wb", encoding='utf8') as f:
json.dump(j, f, indent=2)
else:
print("\nINSTALLED VERSIONS")
print("------------------")
for k, stat in sys_info:
print("%s: %s" % (k, stat))
print("")
for k, stat in deps_blob:
print("%s: %s" % (k, stat))
def main():
# optparse is 2.6-safe
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-j", "--json", metavar="FILE", nargs=1,
help="Save output as JSON into file, pass in '-' to output to stdout")
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| artistic-2.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/alignment_test.py | 12 | 2171 | #!/usr/bin/env python
"""
You can precisely layout text in data or axes (0,1) coordinates. This
example shows you some of the alignment and rotation specifications to
layout text
"""
from pylab import *
from matplotlib.lines import Line2D
from matplotlib.patches import Rectangle
# build a rectangle in axes coords
left, width = .25, .5
bottom, height = .25, .5
right = left + width
top = bottom + height
ax = gca()
p = Rectangle((left, bottom), width, height,
fill=False,
)
p.set_transform(ax.transAxes)
p.set_clip_on(False)
ax.add_patch(p)
ax.text(left, bottom, 'left top',
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)
ax.text(left, bottom, 'left bottom',
horizontalalignment='left',
verticalalignment='bottom',
transform=ax.transAxes)
ax.text(right, top, 'right bottom',
horizontalalignment='right',
verticalalignment='bottom',
transform=ax.transAxes)
ax.text(right, top, 'right top',
horizontalalignment='right',
verticalalignment='top',
transform=ax.transAxes)
ax.text(right, bottom, 'center top',
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
ax.text(left, 0.5*(bottom+top), 'right center',
horizontalalignment='right',
verticalalignment='center',
rotation='vertical',
transform=ax.transAxes)
ax.text(left, 0.5*(bottom+top), 'left center',
horizontalalignment='left',
verticalalignment='center',
rotation='vertical',
transform=ax.transAxes)
ax.text(0.5*(left+right), 0.5*(bottom+top), 'middle',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
ax.text(right, 0.5*(bottom+top), 'centered',
horizontalalignment='center',
verticalalignment='center',
rotation='vertical',
transform=ax.transAxes)
ax.text(left, top, 'rotated\nwith newlines',
horizontalalignment='center',
verticalalignment='center',
rotation=45,
transform=ax.transAxes)
axis('off')
show()
| mit |
vinodkc/spark | python/pyspark/pandas/tests/test_reshape.py | 15 | 11448 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from decimal import Decimal
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas.utils import name_like_string
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class ReshapeTest(PandasOnSparkTestCase):
def test_get_dummies(self):
for pdf_or_ps in [
pd.Series([1, 1, 1, 2, 2, 1, 3, 4]),
# pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
# pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4],
# categories=[4, 3, 2, 1])),
pd.DataFrame(
{
"a": [1, 2, 3, 4, 4, 3, 2, 1],
# 'b': pd.Categorical(list('abcdabcd')),
"b": list("abcdabcd"),
}
),
pd.DataFrame({10: [1, 2, 3, 4, 4, 3, 2, 1], 20: list("abcdabcd")}),
]:
psdf_or_psser = ps.from_pandas(pdf_or_ps)
self.assert_eq(ps.get_dummies(psdf_or_psser), pd.get_dummies(pdf_or_ps, dtype=np.int8))
psser = ps.Series([1, 1, 1, 2, 2, 1, 3, 4])
with self.assertRaisesRegex(
NotImplementedError, "get_dummies currently does not support sparse"
):
ps.get_dummies(psser, sparse=True)
def test_get_dummies_object(self):
pdf = pd.DataFrame(
{
"a": [1, 2, 3, 4, 4, 3, 2, 1],
# 'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
"b": list("abcdabcd"),
# 'c': pd.Categorical(list('abcdabcd')),
"c": list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
# Explicitly exclude object columns
self.assert_eq(
ps.get_dummies(psdf, columns=["a", "c"]),
pd.get_dummies(pdf, columns=["a", "c"], dtype=np.int8),
)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.b), pd.get_dummies(pdf.b, dtype=np.int8))
self.assert_eq(
ps.get_dummies(psdf, columns=["b"]), pd.get_dummies(pdf, columns=["b"], dtype=np.int8)
)
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=("a", "c")))
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns="b"))
# non-string names
pdf = pd.DataFrame(
{10: [1, 2, 3, 4, 4, 3, 2, 1], 20: list("abcdabcd"), 30: list("abcdabcd")}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf, columns=[10, 30]),
pd.get_dummies(pdf, columns=[10, 30], dtype=np.int8),
)
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns=10))
def test_get_dummies_date_datetime(self):
pdf = pd.DataFrame(
{
"d": [
datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
datetime.date(2019, 1, 1),
],
"dt": [
datetime.datetime(2019, 1, 1, 0, 0, 0),
datetime.datetime(2019, 1, 1, 0, 0, 1),
datetime.datetime(2019, 1, 1, 0, 0, 0),
],
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.d), pd.get_dummies(pdf.d, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.dt), pd.get_dummies(pdf.dt, dtype=np.int8))
def test_get_dummies_boolean(self):
pdf = pd.DataFrame({"b": [True, False, True]})
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.b), pd.get_dummies(pdf.b, dtype=np.int8))
def test_get_dummies_decimal(self):
pdf = pd.DataFrame({"d": [Decimal(1.0), Decimal(2.0), Decimal(1)]})
psdf = ps.from_pandas(pdf)
self.assert_eq(ps.get_dummies(psdf), pd.get_dummies(pdf, dtype=np.int8))
self.assert_eq(ps.get_dummies(psdf.d), pd.get_dummies(pdf.d, dtype=np.int8), almost=True)
def test_get_dummies_kwargs(self):
# pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category')
pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4])
psser = ps.from_pandas(pser)
self.assert_eq(
ps.get_dummies(psser, prefix="X", prefix_sep="-"),
pd.get_dummies(pser, prefix="X", prefix_sep="-", dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psser, drop_first=True),
pd.get_dummies(pser, drop_first=True, dtype=np.int8),
)
# nan
# pser = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5], dtype='category')
pser = pd.Series([1, 1, 1, 2, np.nan, 3, np.nan, 5])
psser = ps.from_pandas(pser)
self.assert_eq(ps.get_dummies(psser), pd.get_dummies(pser, dtype=np.int8), almost=True)
# dummy_na
self.assert_eq(
ps.get_dummies(psser, dummy_na=True), pd.get_dummies(pser, dummy_na=True, dtype=np.int8)
)
def test_get_dummies_prefix(self):
pdf = pd.DataFrame({"A": ["a", "b", "a"], "B": ["b", "a", "c"], "D": [0, 0, 1]})
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf, prefix=["foo", "bar"]),
pd.get_dummies(pdf, prefix=["foo", "bar"], dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix=["foo"], columns=["B"]),
pd.get_dummies(pdf, prefix=["foo"], columns=["B"], dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"A": "foo", "B": "bar"}),
pd.get_dummies(pdf, prefix={"A": "foo", "B": "bar"}, dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"B": "foo", "A": "bar"}),
pd.get_dummies(pdf, prefix={"B": "foo", "A": "bar"}, dtype=np.int8),
)
self.assert_eq(
ps.get_dummies(psdf, prefix={"A": "foo", "B": "bar"}, columns=["A", "B"]),
pd.get_dummies(pdf, prefix={"A": "foo", "B": "bar"}, columns=["A", "B"], dtype=np.int8),
)
with self.assertRaisesRegex(NotImplementedError, "string types"):
ps.get_dummies(psdf, prefix="foo")
with self.assertRaisesRegex(ValueError, "Length of 'prefix' \\(1\\) .* \\(2\\)"):
ps.get_dummies(psdf, prefix=["foo"])
with self.assertRaisesRegex(ValueError, "Length of 'prefix' \\(2\\) .* \\(1\\)"):
ps.get_dummies(psdf, prefix=["foo", "bar"], columns=["B"])
pser = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], name="A")
psser = ps.from_pandas(pser)
self.assert_eq(
ps.get_dummies(psser, prefix="foo"), pd.get_dummies(pser, prefix="foo", dtype=np.int8)
)
# columns are ignored.
self.assert_eq(
ps.get_dummies(psser, prefix=["foo"], columns=["B"]),
pd.get_dummies(pser, prefix=["foo"], columns=["B"], dtype=np.int8),
)
def test_get_dummies_dtype(self):
pdf = pd.DataFrame(
{
# "A": pd.Categorical(['a', 'b', 'a'], categories=['a', 'b', 'c']),
"A": ["a", "b", "a"],
"B": [0, 0, 1],
}
)
psdf = ps.from_pandas(pdf)
if LooseVersion("0.23.0") <= LooseVersion(pd.__version__):
exp = pd.get_dummies(pdf, dtype="float64")
else:
exp = pd.get_dummies(pdf)
exp = exp.astype({"A_a": "float64", "A_b": "float64"})
res = ps.get_dummies(psdf, dtype="float64")
self.assert_eq(res, exp)
def test_get_dummies_multiindex_columns(self):
pdf = pd.DataFrame(
{
("x", "a", "1"): [1, 2, 3, 4, 4, 3, 2, 1],
("x", "b", "2"): list("abcdabcd"),
("y", "c", "3"): list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf),
pd.get_dummies(pdf, dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=[("y", "c", "3"), ("x", "a", "1")]),
pd.get_dummies(pdf, columns=[("y", "c", "3"), ("x", "a", "1")], dtype=np.int8).rename(
columns=name_like_string
),
)
self.assert_eq(
ps.get_dummies(psdf, columns=["x"]),
pd.get_dummies(pdf, columns=["x"], dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=("x", "a")),
pd.get_dummies(pdf, columns=("x", "a"), dtype=np.int8).rename(columns=name_like_string),
)
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=["z"]))
self.assertRaises(KeyError, lambda: ps.get_dummies(psdf, columns=("x", "c")))
self.assertRaises(ValueError, lambda: ps.get_dummies(psdf, columns=[("x",), "c"]))
self.assertRaises(TypeError, lambda: ps.get_dummies(psdf, columns="x"))
# non-string names
pdf = pd.DataFrame(
{
("x", 1, "a"): [1, 2, 3, 4, 4, 3, 2, 1],
("x", 2, "b"): list("abcdabcd"),
("y", 3, "c"): list("abcdabcd"),
}
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
ps.get_dummies(psdf),
pd.get_dummies(pdf, dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=[("y", 3, "c"), ("x", 1, "a")]),
pd.get_dummies(pdf, columns=[("y", 3, "c"), ("x", 1, "a")], dtype=np.int8).rename(
columns=name_like_string
),
)
self.assert_eq(
ps.get_dummies(psdf, columns=["x"]),
pd.get_dummies(pdf, columns=["x"], dtype=np.int8).rename(columns=name_like_string),
)
self.assert_eq(
ps.get_dummies(psdf, columns=("x", 1)),
pd.get_dummies(pdf, columns=("x", 1), dtype=np.int8).rename(columns=name_like_string),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_reshape import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
yanlend/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
freilaufdiode/blameyourisp | blameyourisp_graph/blameyourisp_graph.py | 1 | 4308 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import warnings
import matplotlib
matplotlib.use('Agg')
try:
import numpy as np
except:
warnings.warn("numpy is not installed. some statistical values will not be computed")
np = False
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import dateutil.parser
import os
try:
from tzlocal import get_localzone
tz = get_localzone()
except:
try:
import pytz
warnings.warn("Cannot get local timezone, falling back to UTC")
tz = pytz.utc
except:
warnings.warn("Neither tzlocal nor pytz installed, ignoring timezone")
tz = None
FILE = os.path.join(os.path.expanduser("~"), ".blameyourisp", "blameyourisp.log")
LEGEND_FONTSIZE = 8
TITLE_FONTSIZE = 20
def stat( arr, label="" ):
st = label
_len = len(arr)
if st != "":
st += ": "
st += "min: %.2f"%min(arr)
st += ", max: %.2f"%max(arr)
if _len > 0:
st += ", avg: %.2f"%(sum(arr)/_len)
if np != False:
st += ", std: %.2f"%np.std(arr)
st += ", var: %.2f"%np.var(arr)
st += ", n: %d"%_len
return st
def main():
dates = []
pings = []
downs = []
ups = []
units = []
isp = None
isp_fail = False
if not os.path.exists(FILE):
raise Exception("File not found: %s"%FILE)
with open(FILE, 'r') as f:
while True:
date = f.readline()
if date in ("", "\n"): break
_isp = f.readline().split(":")
assert _isp[0] == "ISP"
__isp = _isp[1].strip()
if isp is None:
if __isp not in ("", "\n"):
isp = __isp
if isp is not None and __isp not in ("", "\n") and isp.split("(")[0].strip() != __isp.split("(")[0].strip():
isp_fail = True
warnings.warn("inconsistent ISP's detected '%s', '%s'"%(isp,__isp))
ping = f.readline()
_pings = ping.split(" ")
dates.append(dateutil.parser.parse(date))
if _pings[0] != 'Ping:':
# some error
downs.append(0)
ups.append(0)
pings.append(0)
else:
# test ok
down = f.readline()
_downs = down.split(" ")
up = f.readline()
_ups = up.split(" ")
assert _downs[0] == 'Download:'
assert _ups[0] == 'Upload:'
if len(units) == 0:
units.append(_downs[2].strip())
units.append(_pings[2].strip())
assert _downs[2].strip() == units[0]
assert _ups[2].strip() == units[0]
assert _pings[2].strip() == units[1]
pings.append(float(_pings[1]))
downs.append(float(_downs[1]))
ups.append(float(_ups[1]))
if len(dates) == 0:
raise Exception("no data available")
fig = plt.figure(1,figsize=(10,10),dpi=80)
if isp_fail:
isp = "Warning: inconsistent ISP's detected!"
if isp is None:
isp = "ISP not detected"
fig.suptitle(isp, fontsize=TITLE_FONTSIZE, fontweight='bold')
# Download + Upload
g = fig.add_subplot(211)
plt.ylim(0, 1.1*max(max(downs),max(ups)))
g.plot(dates,downs,"b",label="down")
g.plot(dates,ups,"r",label="up")
g.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d, %H:%M', tz=tz))
g.xaxis.grid()
plt.ylabel("Speed [%s]"%units[0])
plt.title("%s\n%s"%(stat(downs, "\n\ndown"), stat(ups, "up")))
for tick in g.get_xticklabels():
tick.set_rotation(15)
g.legend(fontsize=LEGEND_FONTSIZE,loc=2,ncol=2)
# Ping
g = fig.add_subplot(212)
plt.title(stat(pings, "ping"))
plt.ylim(0, 1.1*max(pings))
g.plot(dates,pings,"g",label="ping")
g.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d, %H:%M', tz=tz))
g.xaxis.grid()
plt.ylabel("Latency [%s]"%units[1])
for tick in g.get_xticklabels():
tick.set_rotation(15)
g.legend(fontsize=LEGEND_FONTSIZE,loc=2)
plt.tight_layout()
plt.savefig( os.path.join(os.path.expanduser("~"), 'blameyourisp.png'))
plt.close()
if __name__ == "__main__":
main()
| apache-2.0 |
gromitsun/sim-xrf-py | others/snr_90_180/snr_180_as.py | 1 | 1768 | import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
# # #fonts# # #
import matplotlib
from matplotlib import rc
matplotlib.rcParams['pdf.fonttype'] = 'truetype'
fontProperties = {'family':'serif','serif':['Arial'],
'weight' : 'normal', 'size' : '12'}
rc('font',**fontProperties)
# # #
n = 1e12
plt.figure('snr_sa_180_as')
ax1 = plt.gca()
ax2 = ax1.twinx()
openings = np.load('snr_as180_500nm.npz')['openings']
snr = np.load('snr_as180_500nm.npz')['snr']*np.sqrt(n)
p2b = np.load('snr_as180_500nm.npz')['p2b']
ax1.plot(openings,snr, 'r-', label='500 nm')
ax2.plot(openings,p2b, 'b-', label='500 nm')
snr = np.load('snr_as180_5um.npz')['snr']*np.sqrt(n)
p2b = np.load('snr_as180_5um.npz')['p2b']
ax1.plot(openings,snr, 'r--', label=r'5 um')
ax2.plot(openings,p2b*10, 'b--', label=r'5 um ($\times$ 10)')
snr = np.load('snr_as180_50um.npz')['snr']*np.sqrt(n)
p2b = np.load('snr_as180_50um.npz')['p2b']
ax1.plot(openings,snr, 'r-.', label=r'50 um')
ax2.plot(openings,p2b*100, 'b-.', label=r'50 um ($\times$ 100)')
snr = np.load('snr_as180_500um.npz')['snr']*np.sqrt(n)
p2b = np.load('snr_as180_500um.npz')['p2b']
ax1.plot(openings,snr, 'r.', label=r'500 um')
ax1.set_ylabel(r'S/N (red)', color = 'r')
ax2.plot(openings,p2b*100, 'b.', label=r'500 um ($\times$ 100)')
plt.ylabel(r'P/B (blue)', color = 'b')
# plt.yscale('log')
# plt.ylim(0,18)
ax1.legend(loc=2,ncol=2,bbox_to_anchor=(0, .85))
ax2.legend(loc=2,ncol=2)
ax1.set_xlabel(r'Collection semi-angle $\phi$ (deg)')
# ax1.ticklabel_format(style='sci', axis='y', scilimits=(-2,2))
# ax2.ticklabel_format(style='sci', axis='y', scilimits=(-2,2))
for tl in ax1.get_yticklabels():
tl.set_color('r')
for tl in ax2.get_yticklabels():
tl.set_color('b')
plt.show()
| mit |
ssaeger/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
caogang/wgan-gp | gan_toy.py | 1 | 9279 | import os, sys
sys.path.append(os.getcwd())
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sklearn.datasets
import tflib as lib
import tflib.plot
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
MODE = 'wgan-gp' # wgan or wgan-gp
DATASET = '8gaussians' # 8gaussians, 25gaussians, swissroll
DIM = 512 # Model dimensionality
FIXED_GENERATOR = False # whether to hold the generator fixed at real data plus
# Gaussian noise, as in the plots in the paper
LAMBDA = .1 # Smaller lambda seems to help for toy tasks specifically
CRITIC_ITERS = 5 # How many critic iterations per generator iteration
BATCH_SIZE = 256 # Batch size
ITERS = 100000 # how many generator iterations to train for
use_cuda = True
# ==================Definition Start======================
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
main = nn.Sequential(
nn.Linear(2, DIM),
nn.ReLU(True),
nn.Linear(DIM, DIM),
nn.ReLU(True),
nn.Linear(DIM, DIM),
nn.ReLU(True),
nn.Linear(DIM, 2),
)
self.main = main
def forward(self, noise, real_data):
if FIXED_GENERATOR:
return noise + real_data
else:
output = self.main(noise)
return output
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
main = nn.Sequential(
nn.Linear(2, DIM),
nn.ReLU(True),
nn.Linear(DIM, DIM),
nn.ReLU(True),
nn.Linear(DIM, DIM),
nn.ReLU(True),
nn.Linear(DIM, 1),
)
self.main = main
def forward(self, inputs):
output = self.main(inputs)
return output.view(-1)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
frame_index = [0]
def generate_image(true_dist):
"""
Generates and saves a plot of the true distribution, the generator, and the
critic.
"""
N_POINTS = 128
RANGE = 3
points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')
points[:, :, 0] = np.linspace(-RANGE, RANGE, N_POINTS)[:, None]
points[:, :, 1] = np.linspace(-RANGE, RANGE, N_POINTS)[None, :]
points = points.reshape((-1, 2))
points_v = autograd.Variable(torch.Tensor(points), volatile=True)
if use_cuda:
points_v = points_v.cuda()
disc_map = netD(points_v).cpu().data.numpy()
noise = torch.randn(BATCH_SIZE, 2)
if use_cuda:
noise = noise.cuda()
noisev = autograd.Variable(noise, volatile=True)
true_dist_v = autograd.Variable(torch.Tensor(true_dist).cuda() if use_cuda else torch.Tensor(true_dist))
samples = netG(noisev, true_dist_v).cpu().data.numpy()
plt.clf()
x = y = np.linspace(-RANGE, RANGE, N_POINTS)
plt.contour(x, y, disc_map.reshape((len(x), len(y))).transpose())
plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')
if not FIXED_GENERATOR:
plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')
plt.savefig('tmp/' + DATASET + '/' + 'frame' + str(frame_index[0]) + '.jpg')
frame_index[0] += 1
# Dataset iterator
def inf_train_gen():
if DATASET == '25gaussians':
dataset = []
for i in xrange(100000 / 25):
for x in xrange(-2, 3):
for y in xrange(-2, 3):
point = np.random.randn(2) * 0.05
point[0] += 2 * x
point[1] += 2 * y
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
np.random.shuffle(dataset)
dataset /= 2.828 # stdev
while True:
for i in xrange(len(dataset) / BATCH_SIZE):
yield dataset[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
elif DATASET == 'swissroll':
while True:
data = sklearn.datasets.make_swiss_roll(
n_samples=BATCH_SIZE,
noise=0.25
)[0]
data = data.astype('float32')[:, [0, 2]]
data /= 7.5 # stdev plus a little
yield data
elif DATASET == '8gaussians':
scale = 2.
centers = [
(1, 0),
(-1, 0),
(0, 1),
(0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
centers = [(scale * x, scale * y) for x, y in centers]
while True:
dataset = []
for i in xrange(BATCH_SIZE):
point = np.random.randn(2) * .02
center = random.choice(centers)
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
dataset /= 1.414 # stdev
yield dataset
def calc_gradient_penalty(netD, real_data, fake_data):
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda() if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
# ==================Definition End======================
netG = Generator()
netD = Discriminator()
netD.apply(weights_init)
netG.apply(weights_init)
print netG
print netD
if use_cuda:
netD = netD.cuda()
netG = netG.cuda()
optimizerD = optim.Adam(netD.parameters(), lr=1e-4, betas=(0.5, 0.9))
optimizerG = optim.Adam(netG.parameters(), lr=1e-4, betas=(0.5, 0.9))
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda()
mone = mone.cuda()
data = inf_train_gen()
for iteration in xrange(ITERS):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for iter_d in xrange(CRITIC_ITERS):
_data = data.next()
real_data = torch.Tensor(_data)
if use_cuda:
real_data = real_data.cuda()
real_data_v = autograd.Variable(real_data)
netD.zero_grad()
# train with real
D_real = netD(real_data_v)
D_real = D_real.mean()
D_real.backward(mone)
# train with fake
noise = torch.randn(BATCH_SIZE, 2)
if use_cuda:
noise = noise.cuda()
noisev = autograd.Variable(noise, volatile=True) # totally freeze netG
fake = autograd.Variable(netG(noisev, real_data_v).data)
inputv = fake
D_fake = netD(inputv)
D_fake = D_fake.mean()
D_fake.backward(one)
# train with gradient penalty
gradient_penalty = calc_gradient_penalty(netD, real_data_v.data, fake.data)
gradient_penalty.backward()
D_cost = D_fake - D_real + gradient_penalty
Wasserstein_D = D_real - D_fake
optimizerD.step()
if not FIXED_GENERATOR:
############################
# (2) Update G network
###########################
for p in netD.parameters():
p.requires_grad = False # to avoid computation
netG.zero_grad()
_data = data.next()
real_data = torch.Tensor(_data)
if use_cuda:
real_data = real_data.cuda()
real_data_v = autograd.Variable(real_data)
noise = torch.randn(BATCH_SIZE, 2)
if use_cuda:
noise = noise.cuda()
noisev = autograd.Variable(noise)
fake = netG(noisev, real_data_v)
G = netD(fake)
G = G.mean()
G.backward(mone)
G_cost = -G
optimizerG.step()
# Write logs and save samples
lib.plot.plot('tmp/' + DATASET + '/' + 'disc cost', D_cost.cpu().data.numpy())
lib.plot.plot('tmp/' + DATASET + '/' + 'wasserstein distance', Wasserstein_D.cpu().data.numpy())
if not FIXED_GENERATOR:
lib.plot.plot('tmp/' + DATASET + '/' + 'gen cost', G_cost.cpu().data.numpy())
if iteration % 100 == 99:
lib.plot.flush()
generate_image(_data)
lib.plot.tick()
| mit |
0todd0000/spm1d | spm1d/rft1d/data.py | 1 | 2017 |
'''
Example datasets
Current datasets include:
* Weather (Ramsay & Silverman, 2005)
'''
# Copyright (C) 2016 Todd Pataky
import os
import numpy as np
from scipy.io import loadmat
def weather():
'''
This dataset was made available by Prof. James O. Ramsay
of McGill University. The dataset was download from:
http://www.psych.mcgill.ca/misc/fda/downloads/FDAfuns/Matlab
on 16 August 2014 (see the `./examples/weather` directory).
No license was found with that dataset. Only "daily.m" and
"daily.mat" from that dataset are redistributed here, on
the condition that the original source be acknowledged.
The dataset is described here:
http://www.psych.mcgill.ca/misc/fda/ex-weather-a1.html
and also in:
Ramsay JO, Silverman BW (2005). Functional Data Analysis
(Second Edition), Springer, New York.
Chapter 13: "Modelling functional responses with
multivariate covariates"
Data subsets include:
- 'Atlantic'
- 'Pacific'
- 'Continental'
- 'Arctic'
:Example use:
>>> weather = rft1d.data.weather()
>>> y = weather['Atlantic'] # (15 x 365) numpy array
>>> from matplotlib import pyplot
>>> pyplot.plot(y.T)
'''
fname = os.path.join(os.path.dirname(__file__), 'data', 'weather', 'daily.mat')
M = loadmat(fname)
Y = M['tempav'].T
geogind = M['geogindex'].flatten()
### place indices (from daily.m, Lines 412-415)
atlindex = [1,2,4,8,9,13,14,15,19,22,23,24,25,28,34] #Atlantic
pacindex = [12,17,18,30,31] #Pacific
conindex = [3,5,6,7,16,20,26,27,29,32,33,35] #Continental
artindex = [10,11,21] #Arctic
### boolean indices for places:
i0 = np.array([i in atlindex for i in geogind])
i1 = np.array([i in pacindex for i in geogind])
i2 = np.array([i in conindex for i in geogind])
i3 = np.array([i in artindex for i in geogind])
### extract places:
y0 = Y[i0]
y1 = Y[i1]
y2 = Y[i2]
y3 = Y[i3]
### build dictionary:
D = dict(Atlantic=y0, Pacific=y1, Continental=y2, Arctic=y3)
return D
| gpl-3.0 |
untom/scikit-learn | sklearn/ensemble/weight_boosting.py | 30 | 40648 | """Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Arnaud Joly <[email protected]>
#
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype)
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _check_sample_weight(self):
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
self._check_sample_weight()
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| bsd-3-clause |
1iyiwei/pyml | code/ch03/share.py | 2 | 1904 | import numpy as np
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import warnings
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02, xlabel='', ylabel='', title=''):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
# plot all samples
if not versiontuple(np.__version__) >= versiontuple('1.9.0'):
X_test, y_test = X[list(test_idx), :], y[list(test_idx)]
warnings.warn('Please update to NumPy 1.9.0 or newer')
else:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0],
X_test[:, 1],
c='',
alpha=1.0,
linewidths=1,
marker='o',
s=55, label='test set')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
| mit |
mhdella/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
timqian/sms-tools | lectures/5-Sinusoidal-model/plots-code/sineModel-anal-synth.py | 24 | 1483 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
x1 = x[0:50000]
w = np.blackman(2001)
N = 2048
H = 500
t = -90
minSineDur = .01
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
tfreq, tmag, tphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
maxplotfreq = 3000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x1.size)/float(fs), x1, 'b', lw=1.5)
plt.axis([0,x1.size/float(fs),min(x1),max(x1)])
plt.title('x (bendir.wav)')
plt.subplot(3,1,2)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('f_t, sine frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b', lw=1.5)
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
UF.wavwrite(y, fs, 'bendir-sine-synthesis.wav')
plt.savefig('sineModel-anal-synth.png')
plt.show()
| agpl-3.0 |
yosssi/scipy_2015_sklearn_tutorial | fetch_data.py | 20 | 2545 | import os
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import zipfile
SENTIMENT140_URL = ("http://cs.stanford.edu/people/alecmgo/"
"trainingandtestdata.zip")
SENTIMENT140_ARCHIVE_NAME = "trainingandtestdata.zip"
def get_datasets_folder():
here = os.path.dirname(__file__)
notebooks = os.path.join(here, 'notebooks')
datasets_folder = os.path.abspath(os.path.join(notebooks, 'datasets'))
datasets_archive = os.path.abspath(os.path.join(notebooks, 'datasets.zip'))
if not os.path.exists(datasets_folder):
if os.path.exists(datasets_archive):
print("Extracting " + datasets_archive)
zf = zipfile.ZipFile(datasets_archive)
zf.extractall('.')
assert os.path.exists(datasets_folder)
else:
print("Creating datasets folder: " + datasets_folder)
os.makedirs(datasets_folder)
else:
print("Using existing dataset folder:" + datasets_folder)
return datasets_folder
def check_sentiment140(datasets_folder):
print("Checking availability of the sentiment 140 dataset")
archive_path = os.path.join(datasets_folder, SENTIMENT140_ARCHIVE_NAME)
sentiment140_path = os.path.join(datasets_folder, 'sentiment140')
train_path = os.path.join(sentiment140_path,
'training.1600000.processed.noemoticon.csv')
test_path = os.path.join(sentiment140_path,
'testdata.manual.2009.06.14.csv')
if not os.path.exists(sentiment140_path):
if not os.path.exists(archive_path):
print("Downloading dataset from %s (77MB)" % SENTIMENT140_URL)
opener = urlopen(SENTIMENT140_URL)
open(archive_path, 'wb').write(opener.read())
else:
print("Found archive: " + archive_path)
print("Extracting %s to %s" % (archive_path, sentiment140_path))
zf = zipfile.ZipFile(archive_path)
zf.extractall(sentiment140_path)
print("Checking that the sentiment 140 CSV files exist...")
assert os.path.exists(train_path)
assert os.path.exists(test_path)
print("=> Success!")
if __name__ == "__main__":
datasets_folder = get_datasets_folder()
check_sentiment140(datasets_folder)
print("Loading Labeled Faces Data (~200MB)")
from sklearn.datasets import fetch_lfw_people
fetch_lfw_people(min_faces_per_person=70, resize=0.4,
data_home=datasets_folder)
print("=> Success!")
| cc0-1.0 |
williamdjones/protein_binding | convert_kinase_h5.py | 1 | 2747 | # this script converts the old h5 file for the kinase subset, which contains no encoded train/test split, to an h5
# with the explicitly encoded train/test split
# by Derek Jones on 12/19/2017
import argparse
import h5py
import pandas as pd
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-i',type=str,help="input file path to old h5")
parser.add_argument('-o',type=str,help="output file path to new h5")
parser.add_argument('-train', type=str, help="input path to file with training compounds")
parser.add_argument('-test', type=str, help="input path to file with testing compounds")
args = parser.parse_args()
old_fo = h5py.File(args.i, "r")
new_fo = h5py.File(args.o, "w")
receptors = list(old_fo.keys()) #extract the receptor names which were stored as root level keys....dumb
features = list(old_fo[receptors[0]].keys()) #extract the features, just use the keys from the first receptor all are same
train_df = pd.read_csv(args.train,index_col=[0]) #expect the first column to be the idxs
test_df = pd.read_csv(args.test,index_col=[0]) #expect the first column to be the idxs
# I know this code is terrible so please save the sarcastic comments...I'm doing my rest rn @ 12:53am
# create the train split
new_fo.create_group("train")
for receptor in receptors:
new_fo['train'].create_group(receptor)
# build up a data_frame or numpy array with all of the feature information for a given receptor, then store these as datasets in the new fo
receptor_df = train_df[train_df['0'] == receptor]
drugIDs = receptor_df['1']
temp_df_list = []
for feature in features:
feature_list = []
for drug in drugIDs:
idx, _ = np.where(np.asarray(old_fo[receptor+"/drugID"]) == drug)
feature_i = old_fo[str(receptor)+"/"+str(feature)][int(idx)]
feature_list.append(feature_i)
new_fo['train'+'/'+str(receptor)+'/'+str(feature)] = np.asarray(feature_list)
# create the train split
new_fo.create_group("test")
for receptor in receptors:
new_fo['test'].create_group(receptor)
# build up a data_frame or numpy array with all of the feature information for a given receptor, then store these as datasets in the new fo
receptor_df = test_df[test_df['0'] == receptor]
drugIDs = receptor_df['1']
temp_df_list = []
for feature in features:
feature_list = []
for drug in drugIDs:
idx,_ = np.where(np.asarray(old_fo[receptor+"/drugID"]) == drug)
print(idx)
feature_i = old_fo[str(receptor)+"/"+str(feature)][int(idx)]
feature_list.append(feature_i)
new_fo['test'+'/'+str(receptor)+'/'+str(feature)] = np.asarray(feature_list)
new_fo.close()
old_fo.close() | mit |
cl4rke/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
rexshihaoren/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
IDEALLab/design_embeddings_jmd_2016 | plot_metric_err_vs_size.py | 1 | 1582 | """
Plots reconstruction error vs semantic sample size
Usage: python metric_err_vs_size.py
Author(s): Wei Chen ([email protected])
"""
import matplotlib.pyplot as plt
import numpy as np
plt.rc("font", size=18)
examples = ['sf_linear', 'sf_s_nonlinear', 'sf_v_nonlinear']
titles = {'glass': 'Glass',
'sf_linear': 'Superformula (linear)',
'sf_s_nonlinear': 'Superformula (slightly nonlinear)',
'sf_v_nonlinear': 'Superformula (very nonlinear)'}
n = len(examples)
x = [5, 60, 120, 180, 240, 300, 360]
for i in range(n):
plt.figure()
plt.xticks(np.arange(-50, 400, 50, dtype=np.int))
plt.xlabel('Sample size')
plt.ylabel('Reconstruction error')
plt.xlim(-40, 400)
#plt.ylim(0, 0.025)
errs = np.zeros((3,7))
for j in range(len(x)):
# Read reconstruction errors in rec_err.txt
txtfile = open('./results/'+examples[i]+'/n_samples = '+str(x[j])+
'/n_control_points = 20/semantic_dim = 2'+'/rec_err.txt', 'r')
k = 0
for line in txtfile:
errs[k, j] = float(line)
k += 1
line_pca, = plt.plot(x, errs[0], '-ob', label='PCA')
line_kpca, = plt.plot(x, errs[1], '-vg', label='Kernel PCA')
line_ae, = plt.plot(x, errs[2], '-sr', label='Autoencoder')
plt.legend(handles=[line_pca, line_kpca, line_ae], fontsize=16)
plt.title(titles[examples[i]])
fig_name = 'err_vs_size_'+examples[i]+'.png'
plt.tight_layout()
plt.savefig('./results/'+fig_name, dpi=300)
print fig_name+' saved!'
| mit |
MJuddBooth/pandas | pandas/tests/io/test_pytables.py | 1 | 192388 | from contextlib import contextmanager
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
import os
import tempfile
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import (
PY35, PY36, BytesIO, is_platform_little_endian, is_platform_windows,
lrange, range, text_type, u)
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, Int64Index, MultiIndex,
RangeIndex, Series, Timestamp, bdate_range, compat, concat, date_range,
isna, timedelta_range)
import pandas.util.testing as tm
from pandas.util.testing import (
assert_frame_equal, assert_series_equal, set_timezone)
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.formats.printing import pprint_thing
from pandas.io.pytables import (
ClosedFileError, HDFStore, PossibleDataLossError, Term, read_hdf)
from pandas.io.pytables import TableIterator # noqa:E402
tables = pytest.importorskip('tables')
# TODO:
# remove when gh-24839 is fixed; this affects numpy 1.16
# and pytables 3.4.4
xfail_non_writeable = pytest.mark.xfail(
LooseVersion(np.__version__) >= LooseVersion('1.16'),
reason=('gh-25511, gh-24839. pytables needs a '
'release beyong 3.4.4 to support numpy 1.16x'))
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except OSError:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except IOError:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except (ValueError, KeyError):
pass
class Base(object):
@classmethod
def setup_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def teardown_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setup_method(self, method):
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def teardown_method(self, method):
pass
@pytest.mark.single
class TestHDFStore(Base):
def test_format_kwarg_in_constructor(self):
# GH 13291
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, HDFStore, path, format='table')
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
pytest.raises(compat.FileNotFoundError,
read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
assert not store.get_storer('df').is_table
pytest.raises(ValueError, store.append, 'df2', df)
pd.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
assert store.get_storer('df').is_table
pd.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with HDFStore(path) as store:
assert not store.get_storer('df').is_table
pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True)
pd.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
assert store.get_storer('df4').is_table
pd.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
assert len(store) == 3
expected = {'/a', '/b', '/c'}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
assert list(store) == []
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store.info()
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
assert 'a' in store
assert 'b' in store
assert 'c' not in store
assert 'foo/bar' in store
assert '/foo/bar' in store
assert '/foo/b' not in store
assert 'bar' not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store['node())'] = tm.makeDataFrame()
assert 'node())' in store
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
assert store.root.a._v_attrs.pandas_version == '0.15.2'
assert store.root.b._v_attrs.pandas_version == '0.15.2'
assert store.root.df1._v_attrs.pandas_version == '0.15.2'
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
pytest.raises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
pytest.raises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
pytest.raises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
pytest.raises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
pytest.raises(ValueError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
result = read_hdf(path, 'df')
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
check_default_mode()
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
pytest.raises(PossibleDataLossError, store.open, 'w')
store.close()
assert not store.is_open
# truncation ok here
store.open('w')
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
assert store.is_open
assert len(store) == 1
assert store._mode == 'r'
store.close()
assert not store.is_open
# reopen as append
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
# reopen as append (again)
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
pytest.raises(KeyError, store.get, 'b')
@pytest.mark.parametrize('where, expected', [
('/', {
'': ({'first_group', 'second_group'}, set()),
'/first_group': (set(), {'df1', 'df2'}),
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
}),
('/second_group', {
'/second_group': ({'third_group'}, {'df3', 's1'}),
'/second_group/third_group': (set(), {'df4'}),
})
])
def test_walk(self, where, expected):
# GH10143
objs = {
'df1': pd.DataFrame([1, 2, 3]),
'df2': pd.DataFrame([4, 5, 6]),
'df3': pd.DataFrame([6, 7, 8]),
'df4': pd.DataFrame([9, 10, 11]),
's1': pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
'a1': np.array([[1, 2, 3], [4, 5, 6]]),
'tb1': np.array([(1, 2, 3), (4, 5, 6)], dtype='i,i,i'),
'tb2': np.array([(7, 8, 9), (10, 11, 12)], dtype='i,i,i')
}
with ensure_clean_store('walk_groups.hdf', mode='w') as store:
store.put('/first_group/df1', objs['df1'])
store.put('/first_group/df2', objs['df2'])
store.put('/second_group/df3', objs['df3'])
store.put('/second_group/s1', objs['s1'])
store.put('/second_group/third_group/df4', objs['df4'])
# Create non-pandas objects
store._handle.create_array('/first_group', 'a1', objs['a1'])
store._handle.create_table('/first_group', 'tb1', obj=objs['tb1'])
store._handle.create_table('/second_group', 'tb2', obj=objs['tb2'])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = '/'.join([path, leaf])
obj = store.get(frame_path)
if 'df' in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
pytest.raises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
pytest.raises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
pytest.raises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# pytest.raises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
pytest.raises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
@td.skip_if_windows_python_3
def test_put_compression_blosc(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_complibs_default_settings(self):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complevel=9)
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'zlib'
# Set complib and check to see if compression is disabled
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complib='zlib')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(self.path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append('dfc', df, complevel=9, complib='blosc')
store.append('df', df)
store.close()
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'blosc'
def test_complibs(self):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version('lzo'):
all_complibs.remove('lzo')
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(self.path) as tmpfile:
gname = 'foo'
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode='r')
for node in h5table.walk_nodes(where='/' + gname,
classname='Leaf'):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
@xfail_non_writeable
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self):
with ensure_clean_store(self.path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.loc[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.randint(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.randint(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.randint(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
assert result.name is None
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
pytest.raises(TypeError, check, 'table', index)
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
check('fixed', index)
@pytest.mark.skipif(not is_platform_little_endian(),
reason="reason platform is not little endian")
def test_encoding(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
pytest.skip("[unicode] is not implemented as a table column")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pd.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
assert_series_equal(s_nan, retr, check_dtype=False,
check_categorical=False)
else:
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.loc[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.loc[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pd.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pd.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.iloc[:, :2], axes=['columns'])
store.append('df1', df.iloc[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select('df1',
'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additional fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
pytest.raises(ValueError, store.append, 'df', df)
# store multile additional fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
pytest.raises(ValueError, store.append, 'df', df)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
pytest.raises(ValueError, store.append, 'df_new', df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index('C')
store.append('ss', df['B'], min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss'), df['B'])
# same as above, with data_columns=True
store.append('ss2', df['B'], data_columns=True,
min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss2'), df['B'])
# min_itemsize in index without appending (GH 10381)
store.put('ss3', df, format='table',
min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
store.append('ss3', df2)
tm.assert_frame_equal(store.select('ss3'),
pd.concat([df, df2]))
# same as above, with a Series
store.put('ss4', df['B'], format='table',
min_itemsize={'index': 6})
store.append('ss4', df2['B'])
tm.assert_series_equal(store.select('ss4'),
pd.concat([df['B'], df2['B']]))
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.loc[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.loc[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize, size
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['B', 'A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
assert store.get_storer('df').data_columns == ['B']
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
pytest.raises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_append_with_empty_string(self):
with ensure_clean_store(self.path) as store:
# with all empty strings (GH 12242)
df = DataFrame({'x': ['a', 'b', 'c', 'd', 'e', 'f', '']})
store.append('df', df[:-1], min_itemsize={'x': 1})
store.append('df', df[-1:], min_itemsize={'x': 1})
tm.assert_frame_equal(store.select('df'), df)
def test_to_hdf_with_min_itemsize(self):
with ensure_clean_path(self.path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index('C')
df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
df2.to_hdf(path, 'ss3', append=True, format='table')
tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
pd.concat([df, df2]))
# same as above, with a Series
df['B'].to_hdf(path, 'ss4', format='table',
min_itemsize={'index': 6})
df2['B'].to_hdf(path, 'ss4', append=True, format='table')
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
@pytest.mark.parametrize(
"format",
[pytest.param('fixed', marks=xfail_non_writeable),
'table'])
def test_to_hdf_errors(self, format):
data = ['\ud800foo']
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(self.path) as path:
# GH 20835
ser.to_hdf(path, 'table', format=format, errors='surrogatepass')
result = pd.read_hdf(path, 'table', errors='surrogatepass')
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc('B')] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indices created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc('A')] = 1.
df_new.iloc[0, df_new.columns.get_loc('B')] = -1.
df_new['string'] = 'foo'
sl = df_new.columns.get_loc('string')
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = 'bar'
df_new['string2'] = 'foo'
sl = df_new.columns.get_loc('string2')
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df',
"string='foo' and string2='foo'"
" and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'],
data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
pytest.raises(TypeError, store.create_table_index, 'f2')
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
pytest.raises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
pytest.raises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
pytest.raises(ValueError, store.append, 'df', df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=['a', 'b'],
index=make_index(['date', 'date', 'date']))
pytest.raises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_mi_data_columns(self):
# GH 14435
idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
range(5)], names=['date', 'id'])
df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=True)
actual = store.select('df', where='id == 1')
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
pytest.raises(TypeError, store.select, 'df', columns=['A'])
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
@xfail_non_writeable
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
pytest.raises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
pytest.raises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# directly ndarray
pytest.raises(TypeError, store.append, 'df', np.arange(10))
# series directly
pytest.raises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
pytest.raises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
pytest.raises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
pytest.raises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame({c: Series(np.random.randint(5), dtype=c)
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']})
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
dtypes = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
dtypes.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
pytest.raises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
@xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion('1.15.0'),
reason=("Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"))
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self):
# GH 17618
time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='fixed')
recons = store['frame']
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.loc[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
result = store.select('df', where="C<pd.Timedelta('-3D')")
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
assert len(store) == 1
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
assert len(store) == 0
# nonexistence
pytest.raises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
assert len(store) == 1
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
assert len(store) == 1
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
assert len(store) == 0
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[0:4, 'string'] = 'bar'
store.put('df', df, format='table')
# some invalid terms
pytest.raises(TypeError, Term)
# more invalid
pytest.raises(
ValueError, store.select, 'df', 'df.index[3]')
pytest.raises(SyntaxError, store.select, 'df', 'index>')
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
pytest.raises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s.iloc[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.iloc[3:5, 1:3] = np.nan
s.iloc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
@xfail_non_writeable
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal)
@xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
pytest.skip('known failer on some windows platforms')
@xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_frame(self, compression):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=compression)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=compression)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=compression)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
@xfail_non_writeable
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
@xfail_non_writeable
@pytest.mark.parametrize(
'dtype', [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]'])
def test_empty_series(self, dtype):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self):
# GH 13884
df = pd.DataFrame({'A': [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize('UTC')
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
recons = store['frame']
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize('table_format', ['table', 'fixed'])
def test_store_index_name_numpy_str(self, table_format):
# GH #13492
idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),
datetime.date(2000, 1, 2)]),
name=u('cols\u05d2'))
idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),
datetime.date(2010, 1, 2)]),
name=u('rows\u05d0'))
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format=table_format)
df2 = read_hdf(path, 'df')
assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == text_type
assert type(df2.columns.name) == text_type
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
tm.assert_series_equal(recons, series)
@xfail_non_writeable
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_store_mixed(self, compression):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=compression)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups across dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
arr = np.random.binomial(n=1, p=.01, size=(1000, 10))
df = DataFrame(arr).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1000)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.loc[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', 'boolv == %s' % str(v),
columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select(
'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
pytest.raises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
pytest.raises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
assert len(results) == 0
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
assert (getattr(getattr(df, idx), attr, None) ==
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
assert store.get_storer('data').info['index']['freq'] is None
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
@pytest.mark.filterwarnings(
"ignore:\\nthe :pandas.io.pytables.AttributeConflictWarning"
)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
assert read_hdf(path, 'data').index.name == 'foo'
with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
assert read_hdf(path, 'data').index.name is None
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
assert crit1.env.scope['date'] == date
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.loc[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.loc[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
pytest.raises(
ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
# pytest.raises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
pytest.raises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.loc[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
pprint_thing("[{0}]".format(detail))
pprint_thing(store)
pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', 'x!=none')
expected = df2[isna(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.loc[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# GH 17912
# HDFStore.select_column should raise a KeyError
# exception if the key is not a valid store
with pytest.raises(KeyError,
match='No object named df in the file'):
store.select_column('df', 'index')
store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
pytest.raises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
pytest.raises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.loc[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
pytest.raises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
pytest.raises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
pytest.raises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
@pytest.mark.xfail(run=False,
reason="append_to_multiple_dropna_false "
"is not raising as failed")
def test_append_to_multiple_dropna_false(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
with pytest.raises(ValueError):
store.select_as_multiple(['df1a', 'df2a'])
assert not store.select('df1a').index.equals(
store.select('df2a').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
pytest.raises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
pytest.raises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop_table(self):
with ensure_clean_store(self.path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self):
# GH 16209
with ensure_clean_store(self.path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple({'selector': ['foo'], 'data': None}, df,
selector='selector')
result = store.select_as_multiple(['selector', 'data'],
selector='selector', start=0,
stop=1)
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
# fixed, GH 8287
df = DataFrame(dict(A=np.random.rand(20),
B=np.random.rand(20)),
index=pd.date_range('20130101', periods=20))
store.put('df', df)
result = store.select(
'df', start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select(
'df', start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put('s', s)
result = store.select('s', start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select('s', start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
dfs = df.to_sparse()
store.put('dfs', dfs)
with pytest.raises(NotImplementedError):
store.select('dfs', start=0, stop=5)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
'b': Series(['ab', 'cd', 'ab'])})
with ensure_clean_store(self.path) as store:
store.append('test_dataset', df)
result = store.select('test_dataset', start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
comparator(retrieved, obj)
def test_multiple_open_close(self):
# gh-4409: open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
assert 'CLOSED' not in store.info()
assert store.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
pytest.raises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert 'CLOSED' not in store1.info()
assert 'CLOSED' not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert 'CLOSED' in store1.info()
assert not store1.is_open
assert 'CLOSED' not in store2.info()
assert store2.is_open
store2.close()
assert 'CLOSED' in store1.info()
assert 'CLOSED' in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
pytest.raises(ClosedFileError, store.keys)
pytest.raises(ClosedFileError, lambda: 'df' in store)
pytest.raises(ClosedFileError, lambda: len(store))
pytest.raises(ClosedFileError, lambda: store['df'])
pytest.raises(AttributeError, lambda: store.df)
pytest.raises(ClosedFileError, store.select, 'df')
pytest.raises(ClosedFileError, store.get, 'df')
pytest.raises(ClosedFileError, store.append, 'df2', df)
pytest.raises(ClosedFileError, store.put, 'df3', df)
pytest.raises(ClosedFileError, store.get_storer, 'df2')
pytest.raises(ClosedFileError, store.remove, 'df2')
with pytest.raises(ClosedFileError, match='file is not open'):
store.select('df')
def test_pytables_native_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(PY35 and is_platform_windows(),
reason="native2 read fails oddly on windows / 3.5")
def test_pytables_native2_read(self, datapath):
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
@xfail_non_writeable
def test_legacy_table_fixed_format_read_py2(self, datapath):
# GH 24510
# legacy table with fixed format written in Python 2
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'legacy_table_fixed_py2.h5'),
mode='r') as store:
result = store.select('df')
expected = pd.DataFrame([[1, 2, 3, 'D']],
columns=['A', 'B', 'C', 'D'],
index=pd.Index(['ABC'],
name='INDEX_NAME'))
assert_frame_equal(expected, result)
def test_legacy_table_read_py2(self, datapath):
# issue: 24925
# legacy table written in Python 2
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'legacy_table_py2.h5'),
mode='r') as store:
result = store.select('table')
expected = pd.DataFrame({
"a": ["a", "b"],
"b": [2, 3]
})
assert_frame_equal(expected, result)
def test_copy(self):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indices & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except (OSError, ValueError):
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
assert store['a'].index[0] == dt
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
@xfail_non_writeable
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# pytest.raises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# Basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, 'si')
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
_maybe_remove(store, 'si2')
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, 'df2')
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert '/df2 ' in info
# assert '/df2/meta/values_block_0/meta' in info
assert '/df2/meta/values_block_1/meta' in info
# unordered
_maybe_remove(store, 's2')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, 'df3')
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append('df3', df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select('df3/meta/s/meta')
assert result is not None
store.remove('df3')
with pytest.raises(KeyError):
store.select('df3/meta/s/meta')
def test_categorical_conversion(self):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ['ESP_012345_6789', 'ESP_987654_3210']
imgids = ['APF00006np', 'APF0001imm']
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype('category')
df.imgids = df.imgids.astype('category')
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame({
'a': ['a', 'b', 'c', np.nan],
'b': [np.nan, np.nan, np.nan, np.nan],
'c': [1, 2, 3, 4],
'd': pd.Series([None] * 4, dtype=object)
})
df['a'] = df.a.astype('category')
df['b'] = df.b.astype('category')
df['d'] = df.b.astype('category')
expected = df
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df')
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
assert cols2load_original == cols2load
@ignore_natural_naming_warning
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
# TODO: Add back to types_should_fail
# https://github.com/pandas-dev/pandas/issues/20907
pass
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
msg = "cannot have non-object label DataIndexableCol"
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
pytest.raises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
pytest.raises(IOError, read_hdf, store, 'df')
def test_read_hdf_generic_buffer_errors(self):
pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, 'df', complib='foolib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_table(self):
# GH13231
df = DataFrame({'i': range(5),
'c': Series(list('abacd'), dtype='category')})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a', format='table')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a', format='table')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_empty(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path)
store.close()
pytest.raises(ValueError, read_hdf, path)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self):
# GH11773
from pathlib import Path
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self):
# GH 14241
df = pd.DataFrame({'A': [1000000000.0009,
1000000000.0011,
1000000000.0015]})
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select('test', 'A == %.4f' % exact)
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self):
# GH 15492
df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],
'real_date': date_range('2014-01-01', periods=2),
'float': [1.1, 1.2],
'int': [1, 2]},
columns=['date', 'real_date', 'float', 'int'])
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ['<', '>', '==']:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp('2014-01-01'),
pd.Timedelta(1, 's')]:
query = 'date {op} v'.format(op=op)
with pytest.raises(TypeError):
store.select('test', where=query)
# strings to other columns must be convertible to type
v = 'a'
for col in ['int', 'float', 'real_date']:
query = '{col} {op} v'.format(op=op, col=col)
with pytest.raises(ValueError):
store.select('test', where=query)
for v, col in zip(['1', '1.1', '2014-01-01'],
['int', 'float', 'real_date']):
query = '{col} {op} v'.format(op=op, col=col)
result = store.select('test', where=query)
if op == '==':
expected = df.loc[[0], :]
elif op == '>':
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('format', ['fixed', 'table'])
def test_read_hdf_series_mode_r(self, format):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(self.path) as path:
series.to_hdf(path, key='data', format=format)
result = pd.read_hdf(path, key='data', mode='r')
tm.assert_series_equal(result, series)
@pytest.mark.skipif(not PY36, reason="Need python 3.6")
def test_fspath(self):
with tm.ensure_clean('foo.h5') as path:
with pd.HDFStore(path) as store:
assert os.fspath(store) == str(path)
def test_read_py2_hdf_file_in_py3(self, datapath):
# GH 16781
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
# the file was generated in Python 2.7 like so:
#
# df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(
['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf',
'periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),
mode='r') as store:
result = store['p']
assert_frame_equal(result, expected)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
@xfail_non_writeable
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
with catch_warnings(record=True):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
objs = [s, df]
comps = [tm.assert_series_equal, tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
with catch_warnings(record=True):
objs = [df]
comps = [tm.assert_frame_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
pytest.raises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparison [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas._libs.tslibs.timezones import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(self.path) as store:
with set_timezone('EST5EDT'):
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
with set_timezone('CST6CDT'):
result = store['obj1']
assert_frame_equal(result, df)
def test_legacy_datetimetz_object(self, datapath):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
datapath('io', 'data', 'legacy_hdf', 'datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
| bsd-3-clause |
aflaxman/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/tseries/converter.py | 2 | 32615 | from datetime import datetime, timedelta
import datetime as pydt
import numpy as np
from dateutil.relativedelta import relativedelta
import matplotlib.units as units
import matplotlib.dates as dates
from matplotlib.ticker import Formatter, AutoLocator, Locator
from matplotlib.transforms import nonsingular
from pandas.compat import lrange
import pandas.compat as compat
import pandas.lib as lib
import pandas.core.common as com
from pandas.core.index import Index
from pandas.core.series import Series
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import FreqGroup
from pandas.tseries.period import Period, PeriodIndex
def register():
units.registry[lib.Timestamp] = DatetimeConverter()
units.registry[Period] = PeriodConverter()
units.registry[pydt.datetime] = DatetimeConverter()
units.registry[pydt.date] = DatetimeConverter()
units.registry[pydt.time] = TimeConverter()
units.registry[np.datetime64] = DatetimeConverter()
def _to_ordinalf(tm):
tot_sec = (tm.hour * 3600 + tm.minute * 60 + tm.second +
float(tm.microsecond / 1e6))
return tot_sec
def time2num(d):
if isinstance(d, compat.string_types):
parsed = tools.to_datetime(d)
if not isinstance(parsed, datetime):
raise ValueError('Could not parse time %s' % d)
return _to_ordinalf(parsed.time())
if isinstance(d, pydt.time):
return _to_ordinalf(d)
return d
class TimeConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
valid_types = (str, pydt.time)
if (isinstance(value, valid_types) or com.is_integer(value) or
com.is_float(value)):
return time2num(value)
if isinstance(value, Index):
return value.map(time2num)
if isinstance(value, (list, tuple, np.ndarray, Index)):
return [time2num(x) for x in value]
return value
@staticmethod
def axisinfo(unit, axis):
if unit != 'time':
return None
majloc = AutoLocator()
majfmt = TimeFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='time')
@staticmethod
def default_units(x, axis):
return 'time'
# time formatter
class TimeFormatter(Formatter):
def __init__(self, locs):
self.locs = locs
def __call__(self, x, pos=0):
fmt = '%H:%M:%S'
s = int(x)
ms = int((x - s) * 1e3)
us = int((x - s) * 1e6 - ms)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
_, h = divmod(h, 24)
if us != 0:
fmt += '.%6f'
elif ms != 0:
fmt += '.%3f'
return pydt.time(h, m, s, us).strftime(fmt)
# Period Conversion
class PeriodConverter(dates.DateConverter):
@staticmethod
def convert(values, units, axis):
if not hasattr(axis, 'freq'):
raise TypeError('Axis must have `freq` set to convert to Periods')
valid_types = (compat.string_types, datetime,
Period, pydt.date, pydt.time)
if (isinstance(values, valid_types) or com.is_integer(values) or
com.is_float(values)):
return get_datevalue(values, axis.freq)
if isinstance(values, PeriodIndex):
return values.asfreq(axis.freq).values
if isinstance(values, Index):
return values.map(lambda x: get_datevalue(x, axis.freq))
if com.is_period_arraylike(values):
return PeriodIndex(values, freq=axis.freq).values
if isinstance(values, (list, tuple, np.ndarray, Index)):
return [get_datevalue(x, axis.freq) for x in values]
return values
def get_datevalue(date, freq):
if isinstance(date, Period):
return date.asfreq(freq).ordinal
elif isinstance(date, (compat.string_types, datetime,
pydt.date, pydt.time)):
return Period(date, freq).ordinal
elif (com.is_integer(date) or com.is_float(date) or
(isinstance(date, (np.ndarray, Index)) and (date.size == 1))):
return date
elif date is None:
return None
raise ValueError("Unrecognizable date '%s'" % date)
def _dt_to_float_ordinal(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if (isinstance(dt, (np.ndarray, Index, Series)) and
com.is_datetime64_ns_dtype(dt)):
base = dates.epoch2num(dt.asi8 / 1.0E9)
else:
base = dates.date2num(dt)
return base
# Datetime Conversion
class DatetimeConverter(dates.DateConverter):
@staticmethod
def convert(values, unit, axis):
def try_parse(values):
try:
return _dt_to_float_ordinal(tools.to_datetime(values))
except Exception:
return values
if isinstance(values, (datetime, pydt.date)):
return _dt_to_float_ordinal(values)
elif isinstance(values, np.datetime64):
return _dt_to_float_ordinal(lib.Timestamp(values))
elif isinstance(values, pydt.time):
return dates.date2num(values)
elif (com.is_integer(values) or com.is_float(values)):
return values
elif isinstance(values, compat.string_types):
return try_parse(values)
elif isinstance(values, (list, tuple, np.ndarray, Index)):
if isinstance(values, Index):
values = values.values
if not isinstance(values, np.ndarray):
values = com._asarray_tuplesafe(values)
if com.is_integer_dtype(values) or com.is_float_dtype(values):
return values
try:
values = tools.to_datetime(values)
if isinstance(values, Index):
values = values.map(_dt_to_float_ordinal)
else:
values = [_dt_to_float_ordinal(x) for x in values]
except Exception:
pass
return values
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = PandasAutoDateLocator(tz=tz)
majfmt = PandasAutoDateFormatter(majloc, tz=tz)
datemin = pydt.date(2000, 1, 1)
datemax = pydt.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
class PandasAutoDateFormatter(dates.AutoDateFormatter):
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
dates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt)
# matplotlib.dates._UTC has no _utcoffset called by pandas
if self._tz is dates.UTC:
self._tz._utcoffset = self._tz.utcoffset(None)
class PandasAutoDateLocator(dates.AutoDateLocator):
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
num_days = ((delta.years * 12.0) + delta.months * 31.0) + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
def _get_unit(self):
return MilliSecondLocator.get_unit_generic(self._freq)
class MilliSecondLocator(dates.DateLocator):
UNIT = 1. / (24 * 3600 * 1000)
def __init__(self, tz):
dates.DateLocator.__init__(self, tz)
self._interval = 1.
def _get_unit(self):
return self.get_unit_generic(-1)
@staticmethod
def get_unit_generic(freq):
unit = dates.RRuleLocator.get_unit_generic(freq)
if unit < 0:
return MilliSecondLocator.UNIT
return unit
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate %d '
'ticks from %s to %s: exceeds Locator.MAXTICKS'
'* 2 (%d) ') %
(estimate, dmin, dmax, self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def _get_interval(self):
return self._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
def _from_ordinal(x, tz=None):
ix = int(x)
dt = datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24 * remainder, 1)
minute, remainder = divmod(60 * remainder, 1)
second, remainder = divmod(60 * remainder, 1)
microsecond = int(1e6 * remainder)
if microsecond < 10:
microsecond = 0 # compensate for rounding errors
dt = datetime(dt.year, dt.month, dt.day, int(hour), int(minute),
int(second), microsecond)
if tz is not None:
dt = dt.astimezone(tz)
if microsecond > 999990: # compensate for rounding errors
dt += timedelta(microseconds=1e6 - microsecond)
return dt
# Fixed frequency dynamic tick locators and formatters
# -------------------------------------------------------------------------
# --- Locators ---
# -------------------------------------------------------------------------
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1, period)
return (current - previous).nonzero()[0]
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
def _daily_finder(vmin, vmax, freq):
periodsperday = -1
if freq >= FreqGroup.FR_HR:
if freq == FreqGroup.FR_NS:
periodsperday = 24 * 60 * 60 * 1000000000
elif freq == FreqGroup.FR_US:
periodsperday = 24 * 60 * 60 * 1000000
elif freq == FreqGroup.FR_MS:
periodsperday = 24 * 60 * 60 * 1000
elif freq == FreqGroup.FR_SEC:
periodsperday = 24 * 60 * 60
elif freq == FreqGroup.FR_MIN:
periodsperday = 24 * 60
elif freq == FreqGroup.FR_HR:
periodsperday = 24
else: # pragma: no cover
raise ValueError("unexpected frequency: %s" % freq)
periodsperyear = 365 * periodsperday
periodspermonth = 28 * periodsperday
elif freq == FreqGroup.FR_BUS:
periodsperyear = 261
periodspermonth = 19
elif freq == FreqGroup.FR_DAY:
periodsperyear = 365
periodspermonth = 28
elif frequencies.get_freq_group(freq) == FreqGroup.FR_WK:
periodsperyear = 52
periodspermonth = 3
else: # pragma: no cover
raise ValueError("unexpected frequency")
# save this for later usage
vmin_orig = vmin
(vmin, vmax) = (Period(ordinal=int(vmin), freq=freq),
Period(ordinal=int(vmax), freq=freq))
span = vmax.ordinal - vmin.ordinal + 1
dates_ = PeriodIndex(start=vmin, end=vmax, freq=freq)
# Initialize the output
info = np.zeros(span,
dtype=[('val', np.int64), ('maj', bool),
('min', bool), ('fmt', '|S20')])
info['val'][:] = dates_.values
info['fmt'][:] = ''
info['maj'][[0, -1]] = True
# .. and set some shortcuts
info_maj = info['maj']
info_min = info['min']
info_fmt = info['fmt']
def first_label(label_flags):
if (label_flags[0] == 0) and (label_flags.size > 1) and \
((vmin_orig % 1) > 0.0):
return label_flags[1]
else:
return label_flags[0]
# Case 1. Less than a month
if span <= periodspermonth:
day_start = period_break(dates_, 'day')
month_start = period_break(dates_, 'month')
def _hour_finder(label_interval, force_year_start):
_hour = dates_.hour
_prev_hour = (dates_ - 1).hour
hour_start = (_hour - _prev_hour) != 0
info_maj[day_start] = True
info_min[hour_start & (_hour % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt[hour_start & (_hour % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
if force_year_start and not has_level_label(year_start, vmin_orig):
info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y'
def _minute_finder(label_interval):
hour_start = period_break(dates_, 'hour')
_minute = dates_.minute
_prev_minute = (dates_ - 1).minute
minute_start = (_minute - _prev_minute) != 0
info_maj[hour_start] = True
info_min[minute_start & (_minute % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[minute_start & (_minute % label_interval == 0)] = '%H:%M'
info_fmt[day_start] = '%H:%M\n%d-%b'
info_fmt[year_start] = '%H:%M\n%d-%b\n%Y'
def _second_finder(label_interval):
minute_start = period_break(dates_, 'minute')
_second = dates_.second
_prev_second = (dates_ - 1).second
second_start = (_second - _prev_second) != 0
info['maj'][minute_start] = True
info['min'][second_start & (_second % label_interval == 0)] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[second_start & (_second %
label_interval == 0)] = '%H:%M:%S'
info_fmt[day_start] = '%H:%M:%S\n%d-%b'
info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y'
if span < periodsperday / 12000.0:
_second_finder(1)
elif span < periodsperday / 6000.0:
_second_finder(2)
elif span < periodsperday / 2400.0:
_second_finder(5)
elif span < periodsperday / 1200.0:
_second_finder(10)
elif span < periodsperday / 800.0:
_second_finder(15)
elif span < periodsperday / 400.0:
_second_finder(30)
elif span < periodsperday / 150.0:
_minute_finder(1)
elif span < periodsperday / 70.0:
_minute_finder(2)
elif span < periodsperday / 24.0:
_minute_finder(5)
elif span < periodsperday / 12.0:
_minute_finder(15)
elif span < periodsperday / 6.0:
_minute_finder(30)
elif span < periodsperday / 2.5:
_hour_finder(1, False)
elif span < periodsperday / 1.5:
_hour_finder(2, False)
elif span < periodsperday * 1.25:
_hour_finder(3, False)
elif span < periodsperday * 2.5:
_hour_finder(6, True)
elif span < periodsperday * 4:
_hour_finder(12, True)
else:
info_maj[month_start] = True
info_min[day_start] = True
year_start = period_break(dates_, 'year')
info_fmt = info['fmt']
info_fmt[day_start] = '%d'
info_fmt[month_start] = '%d\n%b'
info_fmt[year_start] = '%d\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(day_start)] = '%d\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '%d\n%b\n%Y'
# Case 2. Less than three months
elif span <= periodsperyear // 4:
month_start = period_break(dates_, 'month')
info_maj[month_start] = True
if freq < FreqGroup.FR_HR:
info['min'] = True
else:
day_start = period_break(dates_, 'day')
info['min'][day_start] = True
week_start = period_break(dates_, 'week')
year_start = period_break(dates_, 'year')
info_fmt[week_start] = '%d'
info_fmt[month_start] = '\n\n%b'
info_fmt[year_start] = '\n\n%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if not has_level_label(month_start, vmin_orig):
info_fmt[first_label(week_start)] = '\n\n%b\n%Y'
else:
info_fmt[first_label(month_start)] = '\n\n%b\n%Y'
# Case 3. Less than 14 months ...............
elif span <= 1.15 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
week_start = period_break(dates_, 'week')
info_maj[month_start] = True
info_min[week_start] = True
info_min[year_start] = False
info_min[month_start] = False
info_fmt[month_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
info_fmt[first_label(month_start)] = '%b\n%Y'
# Case 4. Less than 2.5 years ...............
elif span <= 2.5 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
month_start = period_break(dates_, 'month')
info_maj[quarter_start] = True
info_min[month_start] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 4. Less than 4 years .................
elif span <= 4 * periodsperyear:
year_start = period_break(dates_, 'year')
month_start = period_break(dates_, 'month')
info_maj[year_start] = True
info_min[month_start] = True
info_min[year_start] = False
month_break = dates_[month_start].month
jan_or_jul = month_start[(month_break == 1) | (month_break == 7)]
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
# Case 5. Less than 11 years ................
elif span <= 11 * periodsperyear:
year_start = period_break(dates_, 'year')
quarter_start = period_break(dates_, 'quarter')
info_maj[year_start] = True
info_min[quarter_start] = True
info_min[year_start] = False
info_fmt[year_start] = '%Y'
# Case 6. More than 12 years ................
else:
year_start = period_break(dates_, 'year')
year_break = dates_[year_start].year
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(year_break % maj_anndef == 0)]
info_maj[major_idx] = True
minor_idx = year_start[(year_break % min_anndef == 0)]
info_min[minor_idx] = True
info_fmt[major_idx] = '%Y'
return info
def _monthly_finder(vmin, vmax, freq):
periodsperyear = 12
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
# Initialize the output
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
dates_ = info['val']
info['fmt'] = ''
year_start = (dates_ % 12 == 0).nonzero()[0]
info_maj = info['maj']
info_fmt = info['fmt']
if span <= 1.15 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = '%b'
info_fmt[year_start] = '%b\n%Y'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = '%b\n%Y'
elif span <= 2.5 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
# TODO: Check the following : is it really info['fmt'] ?
info['fmt'][quarter_start] = True
info['min'] = True
info_fmt[quarter_start] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 4 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6)
info_fmt[jan_or_jul] = '%b'
info_fmt[year_start] = '%b\n%Y'
elif span <= 11 * periodsperyear:
quarter_start = (dates_ % 3 == 0).nonzero()
info_maj[year_start] = True
info['min'][quarter_start] = True
info_fmt[year_start] = '%Y'
else:
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
years = dates_[year_start] // 12 + 1
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%Y'
return info
def _quarterly_finder(vmin, vmax, freq):
periodsperyear = 4
vmin_orig = vmin
(vmin, vmax) = (int(vmin), int(vmax))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
info_maj = info['maj']
info_fmt = info['fmt']
year_start = (dates_ % 4 == 0).nonzero()[0]
if span <= 3.5 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[:] = 'Q%q'
info_fmt[year_start] = 'Q%q\n%F'
if not has_level_label(year_start, vmin_orig):
if dates_.size > 1:
idx = 1
else:
idx = 0
info_fmt[idx] = 'Q%q\n%F'
elif span <= 11 * periodsperyear:
info_maj[year_start] = True
info['min'] = True
info_fmt[year_start] = '%F'
else:
years = dates_[year_start] // 4 + 1
nyears = span / periodsperyear
(min_anndef, maj_anndef) = _get_default_annual_spacing(nyears)
major_idx = year_start[(years % maj_anndef == 0)]
info_maj[major_idx] = True
info['min'][year_start[(years % min_anndef == 0)]] = True
info_fmt[major_idx] = '%F'
return info
def _annual_finder(vmin, vmax, freq):
(vmin, vmax) = (int(vmin), int(vmax + 1))
span = vmax - vmin + 1
info = np.zeros(span,
dtype=[('val', int), ('maj', bool), ('min', bool),
('fmt', '|S8')])
info['val'] = np.arange(vmin, vmax + 1)
info['fmt'] = ''
dates_ = info['val']
(min_anndef, maj_anndef) = _get_default_annual_spacing(span)
major_idx = dates_ % maj_anndef == 0
info['maj'][major_idx] = True
info['min'][(dates_ % min_anndef == 0)] = True
info['fmt'][major_idx] = '%Y'
return info
def get_finder(freq):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
fgroup = frequencies.get_freq_group(freq)
if fgroup == FreqGroup.FR_ANN:
return _annual_finder
elif fgroup == FreqGroup.FR_QTR:
return _quarterly_finder
elif freq == FreqGroup.FR_MTH:
return _monthly_finder
elif ((freq >= FreqGroup.FR_BUS) or fgroup == FreqGroup.FR_WK):
return _daily_finder
else: # pragma: no cover
errmsg = "Unsupported frequency: %s" % (freq)
raise NotImplementedError(errmsg)
class TimeSeries_DateLocator(Locator):
"""
Locates the ticks along an axis controlled by a :class:`Series`.
Parameters
----------
freq : {var}
Valid frequency specifier.
minor_locator : {False, True}, optional
Whether the locator is for minor ticks (True) or not.
dynamic_mode : {True, False}, optional
Whether the locator should work in dynamic mode.
base : {int}, optional
quarter : {int}, optional
month : {int}, optional
day : {int}, optional
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
base=1, quarter=1, month=1, day=1, plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.freq = freq
self.base = base
(self.quarter, self.month, self.day) = (quarter, month, day)
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
def __call__(self):
'Return the locations of the ticks.'
# axis calls Locator.set_axis inside set_m<xxxx>_formatter
vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
vmin, vmax = vi
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.isdynamic:
locs = self._get_default_locs(vmin, vmax)
else: # pragma: no cover
base = self.base
(d, m) = divmod(vmin, base)
vmin = (d + 1) * base
locs = lrange(vmin, vmax + 1, base)
return locs
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
# -------------------------------------------------------------------------
# --- Formatter ---
# -------------------------------------------------------------------------
class TimeSeries_DateFormatter(Formatter):
"""
Formats the ticks along an axis controlled by a :class:`PeriodIndex`.
Parameters
----------
freq : {int, string}
Valid frequency specifier.
minor_locator : {False, True}
Whether the current formatter should apply to minor ticks (True) or
major ticks (False).
dynamic_mode : {True, False}
Whether the formatter works in dynamic mode or not.
"""
def __init__(self, freq, minor_locator=False, dynamic_mode=True,
plot_obj=None):
if isinstance(freq, compat.string_types):
freq = frequencies.get_freq(freq)
self.format = None
self.freq = freq
self.locs = []
self.formatdict = None
self.isminor = minor_locator
self.isdynamic = dynamic_mode
self.offset = 0
self.plot_obj = plot_obj
self.finder = get_finder(freq)
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = dict([(x, f) for (x, _, _, f) in format])
return self.formatdict
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
def __call__(self, x, pos=0):
if self.formatdict is None:
return ''
else:
fmt = self.formatdict.pop(x, '')
return Period(ordinal=int(x), freq=self.freq).strftime(fmt)
| mit |
hallfox/Cryme | app.py | 2 | 3971 | # Flask
from flask import Flask, render_template, request, redirect, url_for, session, abort, make_response
# MongoDB and Sessions
from flask.ext.session import Session
from pymongo import MongoClient
#ML Libs
import numpy as np
from sklearn.svm import SVC
from sklearn.externals import joblib
from sklearn.preprocessing import scale
# Miscellaneous
import os, logging, json, datetime
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
app = Flask(__name__)
# MongoDB Setup
client = MongoClient(os.getenv('MONGOHQ_URL'))
db = client.bc15
# MongoDB Session Setup
SESSION_TYPE = "mongodb"
SESSION_MONGODB = client
SESSION_MONGODB_DB = "bc15"
SESSION_MONGODB_COLLECT = "sessions"
app.secret_key = '\xcbC\xb0\x0b\xe6\xe7\xc2\xd3u\xbf\xff\x9b\xfb\xb8\xb1\xb9^Y\xdbQ\xba\x7f\xabl'
app.config.from_object(__name__)
Session(app)
@app.before_first_request
def setup_logging():
if not app.debug:
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.route('/')
def index():
return render_template('index.html', template_folder=tmpl_dir)
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html', template_folder=tmpl_dir)
@app.route('/history')
def history():
_id = "session:" + str(session.sid)
data = db.history.find_one({ "id" : _id })
if data != None:
return render_template('history.html', data=data, template_folder=tmpl_dir)
return render_template('error.html', template_folder=tmpl_dir, error_msg="No Search History Found",
return_home="You have to search the map before you can see your search history."
)
@app.route('/clear')
def clear_history():
_id = "session:" + str(session.sid)
data = db.history.remove({ "id" : _id })
return redirect(url_for('history'))
@app.route('/predict', methods=["POST"])
def predict():
_id = "session:" + str(session.sid)
hours, minutes = [int(i) for i in request.form["timepicker"].split(':')]
time = (60 * hours) + minutes
svm = get_predictor()
if svm != None:
data = scale(np.array([[float(time), float(request.form["MapLat"]), float(request.form["MapLon"])]]))
prediction = svm.predict(data)
probability = round(np.amax(svm.predict_proba(data)) * 100, 2)
db.history.update({ "id" : _id }, {"$push": { "history": {
"timeOfDay": int(time),
"latitude": float(request.form["MapLat"]),
"longitude": float(request.form["MapLon"]),
"address": str(request.form["gmaps-input"]),
"probability": float(probability),
"prediction": str(prediction[0])[2:-1]
}}}, upsert=True);
return render_template('dashboard.html', prediction=str(prediction[0])[2:-1], addr=request.form["gmaps-input"], lat=request.form["MapLat"],
lng=request.form["MapLon"], probability=probability, template_folder=tmpl_dir)
@app.errorhandler(401)
def unauthorized(error):
return render_template('error.html', template_folder=tmpl_dir, error=401, error_msg="Unauthorized",
return_home="You must be logged in to access this page!"
)
@app.errorhandler(500)
def internal_server(e):
return render_template('error.html', template_folder=tmpl_dir, error=500, error_msg="Internal Server Error",
return_home="Something went wrong! Let us know if it happens again!"
)
@app.errorhandler(404)
def page_not_found(e):
return render_template('error.html', template_folder=tmpl_dir, error=404, error_msg="Page Not Found",
return_home="We can't find what you're looking for."
)
def get_predictor():
try:
svm = joblib.load("data/svm.pkl")
except:
crime_data = scale(np.loadtxt(open("data/crimeData.csv", "rb"), delimiter=",", skiprows=1))
crime_target = np.loadtxt(open("data/crimeLabels.csv", "rb"), delimiter=",", dtype=str)
svm = SVC(kernel="rbf", probability=True).fit(crime_data, crime_target)
joblib.dump(svm, "data/svm.pkl")
finally:
return svm
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| mit |
abhipr1/DATA_SCIENCE_INTENSIVE | Machine_Learning/UnSupervised_Learning/Clustering/K_means_cluster.py | 1 | 3027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
temp = [data_dict[key]['salary'] for key in data_dict.keys()]
salaries = [x for x in temp if x!='NaN']
print('max salary:',max(salaries))
print('min salary:',min(salaries))
temp = [data_dict[key]['exercised_stock_options'] for key in data_dict.keys()]
stock_options = [x for x in temp if x!='NaN']
print('max stock_options:',max(stock_options))
print('min stock_options:',min(stock_options))
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
feature_3 = "total_payments"
poi = "poi"
features_list = [poi, feature_1, feature_2,feature_3]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2, _ in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2,feature_3]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print("no predictions object named pred found, no clusters to plot")
| apache-2.0 |
scls19fr/blaze | blaze/expr/collections.py | 6 | 18724 | from __future__ import absolute_import, division, print_function
import datashape
from datashape import DataShape, Option, Record, Unit, dshape, var, Fixed, Var
from datashape.predicates import isscalar, iscollection, isrecord
from toolz import (
isdistinct, frequencies, concat as tconcat, unique, get, first,
)
from odo.utils import copydoc
from .core import common_subexpression
from .expressions import Expr, ElemWise, label, Field
from .expressions import dshape_method_list
from ..compatibility import zip_longest, _strtypes
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'IsIn', 'isin', 'distinct',
'merge', 'head', 'sort', 'Join', 'join', 'transform', 'Concat',
'concat', 'Tail', 'tail']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort a collection
Parameters
----------
key : str, list of str, or Expr
Defines by what you want to sort.
* A single column string: ``t.sort('amount')``
* A list of column strings: ``t.sort(['name', 'amount'])``
* An expression: ``t.sort(-t.amount)``
ascending : bool, optional
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
""" Remove duplicate elements from an expression
Parameters
----------
on : tuple of :class:`~blaze.expr.expressions.Field`
The subset of fields or names of fields to be distinct on.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
Use a subset by passing `on`:
>>> import pandas as pd
>>> e = distinct(t, 'name')
>>> data = pd.DataFrame([['Alice', 100, 1],
... ['Alice', 200, 2],
... ['Bob', 100, 1],
... ['Bob', 200, 2]],
... columns=['name', 'amount', 'id'])
>>> compute(e, data)
name amount id
0 Alice 100 1
1 Bob 100 1
"""
__slots__ = '_hash', '_child', 'on'
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct({child}{on})'.format(
child=self._child,
on=(', ' if self.on else '') + ', '.join(map(str, self.on))
)
@copydoc(Distinct)
def distinct(expr, *on):
fields = frozenset(expr.fields)
_on = []
append = _on.append
for n in on:
if isinstance(n, Field):
if n._child.isidentical(expr):
n = n._name
else:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
if not isinstance(n, _strtypes):
raise TypeError('on must be a name or field, not: {0}'.format(n))
elif n not in fields:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
append(n)
return Distinct(expr, tuple(_on))
class _HeadOrTail(Expr):
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%d)' % (self._child, type(self).__name__.lower(), self.n)
class Head(_HeadOrTail):
""" First `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Tail
"""
pass
@copydoc(Head)
def head(child, n=10):
return Head(child, n)
class Tail(_HeadOrTail):
""" Last `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.tail(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Head
"""
pass
@copydoc(Tail)
def tail(child, n=10):
return Tail(child, n)
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in sorted(kwargs.items(), key=first)]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(tconcat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(tconcat(i._leaves() for i in self.children)))
@copydoc(Merge)
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs += tuple(label(v, k) for k, v in sorted(kwargs.items(), key=first))
try:
child = common_subexpression(*exprs)
except Exception:
raise ValueError("No common subexpression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError(
"Repeated columns found: " + ', '.join(
k for k, v in frequencies(result.fields).items() if v > 1
),
)
return result
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs, rhs : Expr
Expressions to join
on_left : str, optional
The fields from the left side to join on.
If no ``on_right`` is passed, then these are the fields for both
sides.
on_right : str, optional
The fields from the right side to join on.
how : {'inner', 'outer', 'left', 'right'}
What type of join to perform.
suffixes: pair of str
The suffixes to be applied to the left and right sides
in order to resolve duplicate field names.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = (
'_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes',
)
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
if isinstance(self._on_left, tuple):
return list(self._on_left)
else:
return self._on_left
@property
def on_right(self):
if isinstance(self._on_right, tuple):
return list(self._on_right)
else:
return self._on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
joined = [[name, dt] for name, dt in self.lhs.schema[0].parameters[0]
if name in self.on_left]
left = [[name, dt] for name, dt in
zip(self.lhs.fields, types_of_fields(
self.lhs.fields, self.lhs))
if name not in self.on_left]
right = [[name, dt] for name, dt in
zip(self.rhs.fields, types_of_fields(
self.rhs.fields, self.rhs))
if name not in self.on_right]
# Handle overlapping but non-joined case, e.g.
left_other = [name for name, dt in left if name not in self.on_left]
right_other = [name for name, dt in right if name not in self.on_right]
overlap = set.intersection(set(left_other), set(right_other))
left_suffix, right_suffix = self.suffixes
left = [[name + left_suffix if name in overlap else name, dt]
for name, dt in left]
right = [[name + right_suffix if name in overlap else name, dt]
for name, dt in right]
if self.how in ('right', 'outer'):
left = [[name, option(dt)] for name, dt in left]
if self.how in ('left', 'outer'):
right = [[name, option(dt)] for name, dt in right]
return dshape(Record(joined + left + right))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields = fields[0]
assert fields == expr._name
return expr.dshape.measure
@copydoc(Join)
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError("Can not Join. No shared columns between %s and %s" %
(lhs, rhs))
if types_of_fields(on_left, lhs) != types_of_fields(on_right, rhs):
raise TypeError("Schema's of joining columns do not match")
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs, rhs : Expr
Collections to concatenate
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
Vertically stack tables:
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
Vertically stack matrices:
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack matrices:
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', 'axis'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
@copydoc(Concat)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
class IsIn(ElemWise):
"""Check if an expression contains values from a set.
Return a boolean expression indicating whether another expression
contains values that are members of a collection.
Parameters
----------
expr : Expr
Expression whose elements to check for membership in `keys`
keys : Sequence
Elements to test against. Blaze stores this as a ``frozenset``.
Examples
--------
Check if a vector contains any of 1, 2 or 3:
>>> from blaze import symbol
>>> t = symbol('t', '10 * int64')
>>> expr = t.isin([1, 2, 3])
>>> expr.dshape
dshape("10 * bool")
"""
__slots__ = '_hash', '_child', '_keys'
@property
def schema(self):
return datashape.bool_
def __str__(self):
return '%s.%s(%s)' % (self._child, type(self).__name__.lower(),
self._keys)
@copydoc(IsIn)
def isin(expr, keys):
if isinstance(keys, Expr):
raise TypeError('keys argument cannot be an expression, '
'it must be an iterable object such as a list, '
'tuple or set')
return IsIn(expr, frozenset(keys))
dshape_method_list.extend([
(iscollection, set([sort, head, tail])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin])),
])
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/utils/tests/test_random.py | 85 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
costypetrisor/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.