repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
MartinThoma/algorithms | perceptron/perceptron.py | 1 | 4057 | #!/usr/bin/env python
"""Example implementation for a perceptron."""
import logging
import math
import sys
import numpy as np
from sklearn.metrics import accuracy_score
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
class Activation:
"""Containing various activation functions."""
@staticmethod
def sign(netOutput, threshold=0):
return netOutput < threshold
@staticmethod
def sigmoid(netOutput):
return 1 / (1 + math.e**(-1.0 * netOutput))
@staticmethod
def tanh(netOutput):
pass
@staticmethod
def rectified(netOutput):
pass
@staticmethod
def softmax(netOutput):
pass
class Perceptron:
"""
A perceptron classifier.
Parameters
----------
train : list
valid : list
test : list
learningRate : float
epochs : positive int
Attributes
----------
learningRate : float
epochs : int
trainingSet : list
validationSet : list
testSet : list
weight : list
"""
def __init__(self, train, valid, test, learningRate=0.01, epochs=10):
self.learningRate = learningRate
self.epochs = epochs
self.trainingSet = train
self.validationSet = valid
self.testSet = test
# Initialize the weight vector with small random values
# around 0 and 0.1
self.weight = np.random.rand(self.trainingSet['x'].shape[1], 1) / 1000
self.weight = self.weight.astype(np.float32)
def train(self, verbose=True):
"""
Train the perceptron with the perceptron learning algorithm.
Parameters
----------
verbose : bool
Print logging messages with validation accuracy if verbose is True.
"""
for i in range(1, self.epochs + 1):
pred = self.evaluate(self.validationSet['x'])
if verbose:
val_acc = accuracy_score(self.validationSet['y'], pred) * 100
logging.info("Epoch: %i (Validation acc: %0.4f%%)", i, val_acc)
for X, y in zip(self.trainingSet['x'], self.trainingSet['y']):
pred = self.classify(X)
X = np.array([X]).reshape(784, 1)
self.weight += self.learningRate * (y - pred) * X * (-1)
def classify(self, testInstance):
"""
Classify a single instance.
Parameters
----------
testInstance : list of floats
Returns
-------
bool :
True if the testInstance is recognized as a 7, False otherwise.
"""
return self.fire(testInstance)
def evaluate(self, data=None):
if data is None:
data = self.testSet['x']
return list(map(self.classify, data))
def fire(self, input_):
return Activation.sign(np.dot(np.array(input_, dtype=np.float32),
self.weight))
def main():
"""Run an example."""
# Get data
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original', data_home='.')
x = mnist.data
y = mnist.target
y = np.array([3 == el for el in y], dtype=np.float32)
x = x / 255.0 * 2 - 1 # Scale data to [-1, 1]
x = x.astype(np.float32)
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=0.10,
random_state=42)
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train,
test_size=0.10,
random_state=1337)
p = Perceptron({'x': x_train, 'y': y_train},
{'x': x_valid, 'y': y_valid},
{'x': x_test, 'y': y_test})
p.train(verbose=True)
if __name__ == '__main__':
main()
| mit |
awalls-cx18/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 6 | 6198 | #!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import *
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
SnakeJenny/TensorFlow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 8 | 42354 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return const, const, control_flow_ops.group(train_op_1, training_op_2)
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, control_flow_ops.no_op()
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(est.model_dir + '/export', serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
arielmakestuff/loadlimit | test/unit/util/test_event.py | 1 | 4403 | # -*- coding: utf-8 -*-
# test/unit/util/test_event.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test Logger"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import json
import logging
# Third-party imports
from pandas import Timestamp
import pytest
# Local imports
from loadlimit.util import Event, EventType, Logger, now
# ============================================================================
# Test __init__
# ============================================================================
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_event_type_badval(val):
"""Raise error if given a bad value for the event_type arg"""
expected = ('event_type arg expected {} object, got {} object instead'.
format(EventType.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(val)
assert err.value.args == (expected, )
@pytest.mark.parametrize('val', list(EventType))
def test_init_event_type_goodval(val):
"""Accept valid value for the event_type arg"""
e = Event(val)
assert e.type == val
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_timestamp_badval(val):
"""Raise error if given a bad value for the timestamp arg"""
expected = ('timestamp arg expected {} object, got {} object instead'.
format(Timestamp.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(EventType.start, val)
assert err.value.args == (expected, )
def test_init_timestamp_noval():
"""Automatically create the current timestamp if arg given None"""
cur = now()
e = Event(EventType.start)
assert e.timestamp.floor('s') == cur.floor('s')
def test_init_timestamp_goodval():
"""Accept valid value for timestamp arg"""
cur = now()
e = Event(EventType.start, cur)
assert e.timestamp == cur
@pytest.mark.parametrize('val', [42, 4.2, '42', [42], (42, )])
def test_init_logger_badbal(val):
"""Raise error if given bad value for the logger arg"""
expected = ('logger arg expected {} object, got {} object instead'.
format(Logger.__name__, type(val).__name__))
with pytest.raises(TypeError) as err:
Event(EventType.start, logger=val)
assert err.value.args == (expected, )
def test_init_logger_noval(caplog):
"""Don't log anything if logger arg is not given a value"""
with caplog.at_level(logging.DEBUG):
Event(EventType.start)
assert len(caplog.records) == 0
def test_init_logger_goodval(caplog):
"""Log an info message if given a logging.Logger object"""
logger = Logger(name=__name__)
e = Event(EventType.start, logger=logger)
expected = dict(name=e.type.name, timestamp=e.timestamp)
assert len(caplog.records) == 1
r = caplog.records[0]
# Check record
pre = 'EVENT: '
assert r.levelno == logging.INFO
assert r.message.startswith(pre)
message = json.loads(r.message[len(pre):])
message['timestamp'] = Timestamp(message['timestamp'], tz='UTC')
assert message == expected
# ============================================================================
# Test __getitem__
# ============================================================================
@pytest.mark.parametrize('key', [0, 1])
def test_getitem_goodkey(key):
"""__getitem__() retrieves correct value"""
e = Event(EventType.start, now())
assert e[key] == e._val[key]
def test_getitem_badkey():
"""Raise error when given bad key"""
expected = 'tuple index out of range'
e = Event(EventType.start)
with pytest.raises(IndexError) as err:
e[42]
assert err.value.args == (expected, )
# ============================================================================
# Test __len__
# ============================================================================
def test_len():
"""Return number of items contained in the Event"""
e = Event(EventType.start)
assert len(e) == 2
# ============================================================================
#
# ============================================================================
| mit |
jvpoulos/cs289-hw5 | hw5-code/census_clf.py | 1 | 2480 | import numpy as np
import copy
import cPickle as pickle
import decision_tree as dt
from sklearn.cross_validation import train_test_split
# Load train data
# train, load from csv without headers
features_train = np.genfromtxt('../census-dataset/census-train-features-median.csv', delimiter=' ', skip_header=1)
# remove index column
features_train = features_train[:,1:]
labels_train = np.genfromtxt('../census-dataset/census-train-labels.csv', delimiter=' ', skip_header=1)
# remove index column
labels_train = labels_train[:,1:][:,0]
# split to obtain train and test set
x_train, x_test, y_train, y_test = train_test_split(features_train, labels_train, test_size=0.33)
# concatenate features and labels
data_train = np.column_stack((x_train, y_train))
data_test = np.column_stack((x_test, y_test))
# build decision tree using entropy
decision_tree = dt.buildtree(data_train, dt.entropy, 0.01)
min_gain_error = {}
# test minimal gain values for pruning
for min_gain_value in np.arange(0,1, 0.01):
dt_temp = copy.copy(decision_tree)
dt.prune(dt_temp, min_gain_value)
# classify test data
y_hat = map(lambda obs : dt.classify(obs, dt_temp), x_test)
y_hat = map(dt.convertToLabel, y_hat)
y_hat = np.array(y_hat)
error = (y_hat != y_test).sum() / float(y_test.shape[0])
min_gain_error[min_gain_value] = error
# prune tree with optimal min_gain value
min_gain_opt = min(dict.items(min_gain_error))[0]
dt.prune(decision_tree, min_gain_opt)
# print and draw decision tree
# dt.drawtree(decision_tree,png='census_decision_tree.png')
# dt.printtree(decision_tree)
# classify validation set with pruned tree
y_hat_val = map(lambda obs : dt.classify(obs, decision_tree), x_test)
y_hat_val = map(dt.convertToLabel, y_hat_val)
y_hat_val = np.array(y_hat_val)
# report test set error
error_val = (y_hat_val != y_test).sum() / float(y_test.shape[0])
print error_val
# load test features
features_test = np.genfromtxt('../census-dataset/census-test-features-median.csv', delimiter=' ', skip_header=1)
features_test = features_test[:,1:]
# classify test set with pruned tree
y_hat_test = map(lambda obs : dt.classify(obs, decision_tree), features_test)
y_hat_test = map(dt.convertToLabel, y_hat_test)
y_hat_test = np.array(y_hat_test)
# export labels for kaggle submission
test_ids = np.arange(1,y_hat_test.shape[0]+1, 1)
np.savetxt("census_decision_tree.csv", np.vstack((test_ids,y_hat_test)).T, delimiter=",", fmt='%1.0f',header='Id,Category')
| mit |
newville/scikit-image | doc/examples/plot_rag.py | 25 | 2139 | """
=======================
Region Adjacency Graphs
=======================
This example demonstrates the use of the `merge_nodes` function of a Region
Adjacency Graph (RAG). The `RAG` class represents a undirected weighted graph
which inherits from `networkx.graph` class. When a new node is formed by
merging two nodes, the edge weight of all the edges incident on the resulting
node can be updated by a user defined function `weight_func`.
The default behaviour is to use the smaller edge weight in case of a conflict.
The example below also shows how to use a custom function to select the larger
weight instead.
"""
from skimage.future.graph import rag
import networkx as nx
from matplotlib import pyplot as plt
import numpy as np
def max_edge(g, src, dst, n):
"""Callback to handle merging nodes by choosing maximum weight.
Returns either the weight between (`src`, `n`) or (`dst`, `n`)
in `g` or the maximum of the two when both exist.
Parameters
----------
g : RAG
The graph under consideration.
src, dst : int
The vertices in `g` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
weight : float
The weight between (`src`, `n`) or (`dst`, `n`) in `g` or the
maximum of the two when both exist.
"""
w1 = g[n].get(src, {'weight': -np.inf})['weight']
w2 = g[n].get(dst, {'weight': -np.inf})['weight']
return max(w1, w2)
def display(g, title):
"""Displays a graph with the given title."""
pos = nx.circular_layout(g)
plt.figure()
plt.title(title)
nx.draw(g, pos)
nx.draw_networkx_edge_labels(g, pos, font_size=20)
g = rag.RAG()
g.add_edge(1, 2, weight=10)
g.add_edge(2, 3, weight=20)
g.add_edge(3, 4, weight=30)
g.add_edge(4, 1, weight=40)
g.add_edge(1, 3, weight=50)
# Assigning dummy labels.
for n in g.nodes():
g.node[n]['labels'] = [n]
gc = g.copy()
display(g, "Original Graph")
g.merge_nodes(1, 3)
display(g, "Merged with default (min)")
gc.merge_nodes(1, 3, weight_func=max_edge, in_place=False)
display(gc, "Merged with max without in_place")
plt.show()
| bsd-3-clause |
mugizico/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 142 | 5990 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
mxjl620/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
cdfassnacht/CodeCDF | python/plot_grade_hist.py | 1 | 2880 | """
A python program to plot a grade histogram
Usage: python plot_grade_hist.py [filename] [colname] [maxy] ([nscorecols])
Required inputs:
filename - text file containing either two columns (name total) or
three columns (name, score_multiple-choice, score_short-answer)
colname - the name of the column containing the score of interest.
NOTE: for the old-school text files, this will be 'col2'
_unless_ the old-school file also is in 3-column format, in which
case this parameter is ignored and the optional nscorecols
parameter should be set to 2.
If the input file is in CSV format, the colname parameter could
be something like 'Midterm 2 (32620)' or 'MT2' or 'Final Grade'
maxy - maximum value for y axis
Optional input:
nscorecols - number of score columns (1 for 2-column input, 2 for 3-column
input). ONLY set this if the input file is in the old-school
text format AND it is in 3-column format (i.e., with
nscorecols=2). If it is in the old-school text format but is
in the 2-column input, then DO NOT set this keyword, but just
set the colname variable above to 'col2'
"""
import numpy as n
from matplotlib import pyplot as p
import sys
import gradefuncs as gf
if len(sys.argv) < 4:
print('')
print('ERROR: This program requires at least 3 input parameters:')
print(' 1. infile - name of the input file containing scores')
print(' 2. colname - name of column containing the relevant score if the')
print(' input file is in csv format produced by smartsite or canvas or')
print(' if it is in old-school text format with one total-score column')
print(' In the second case (text format with one column of scores) the')
print(' colname parameter should be set to "col2"')
print(' 3. maxy - maximum y value for plot')
print('It may also take an optional fourth parameter, which should ONLY BE')
print(' USED if the file is BOTH in the old-school text format and has')
print(' two columns with scores (one for multiple-choice and one for short')
print(' answer), in which case, this parameter should be used and set to 2.')
print('')
print('Format: python plot_grade_hist.py infile colname maxy')
print(' --- or ---')
print('Format: python plot_grade_hist.py infile colname maxy 2')
print('')
sys.exit()
if len(sys.argv) == 5:
old_3col = True
else:
old_3col = False
infile = sys.argv[1]
colname = sys.argv[2]
maxy = float(sys.argv[3])
if old_3col:
tot = gf.read_text(infile,2)
else:
tot = gf.read_table(infile, colname)
if tot is None:
print('Could not plot histogram')
print('')
sys.exit()
binsize = 3
gf.plot_tothist(infile,tot,maxy,binsize)
| mit |
jcchin/Hyperloop_v2 | src/hyperloop/Python/structural_optimization.py | 4 | 17480 | from __future__ import print_function
import numpy as np
import matplotlib.pylab as plt
from openmdao.api import IndepVarComp, Component, Group, Problem, ExecComp, ScipyOptimizer
class StructuralOptimization(Component):
"""
Notes
-----
Estimates tube tunnel cost and pylon material cost
Optimizes tunnel thickness, pylon radius, and pylon spacing
Many parameters are currently taken from hyperloop alpha, will eventually pull from mission trajectory
Params
------
tube_area : float
Inner tube radius. Default is 3.8013 m**2
rho_tube : float
Density of tube material. Default is 7820 kg/m**3
E_tube : float
Young's modulus of tube material. Default value is 200e9 Pa
v_tube : float
Poisson's ratio of tube material. Default value is .3
Su_tube : float
Ultimate strength of tube material. Default value is 152e6 Pa
sf : float
Tube safety factor. Default value is 1.5
g : float
Gravitational acceleration. Default value is 9.81 m/s**2
unit_cost_tube : float
Cost of tube material per unit mass. Default value is .33 USD/kg
p_tunnel : float
Pressure of air in tube. Default value is 850 Pa. Value will come from vacuum component
p_ambient : float
Pressure of atmosphere. Default value is 101.3e3 Pa.
alpha_tube : float
Coefficient of thermal expansion of tube material. Default value is 0.0
dT_tube : float
Difference in tunnel temperature as compared ot a reference temperature. Default value is 0.0
m_pod : float
total mass of pod. Default value is 3100 kg. Value will come from weight component
r : float
Radius of tube. Default value is 1.1 m. Value will come from aero module
t : float
Thickness of the tube. Default value is 50 mm. Value is optimized in problem driver.
rho_pylon : float
Density of pylon material. Default value is 2400 kg/m**3
E_pylon : float
Young's modulus of pylon material. Default value is 41e9 Pa
v_pylon : float
Poisson's ratio of pylon material. Default value is .2
Su_pylon : float
Ultimate strength of pylon material. Default value is 40e6 Pa
unit_cost_pylon : float
Cost of pylon material per unit mass. Default value is .05 USD/kg
h : float
Height of each pylon. Default value is 10 m.
r_pylon : float
Radius of each pylon. Default value is 1 m. Value will be optimized in problem driver
vac_weight : float
Total weight of vacuums. Default value is 1500.0 kg. Value will come from vacuum component
Returns
-------
m_pylon : float
mass of individual pylon in kg/pylon
m_prime: float
Calculates mass per unit length of tube in kg/m
von_mises : float
Von Mises stress in the tube in Pa
total_material_cost : float
returns total cost of tube and pylon materials per unit distance in USD/m
R : float
Returns vertical component of force on each pylon in N
delta : float
Maximum deflection of tube between pylons in m
dx : float
outputs distance in between pylons in m
t_crit :
Minimum tube thickness to satisfy vacuum tube buckling condition in m
Notes
-----
[1] USA. NASA. Buckling of Thin-Walled Circular Cylinders. N.p.: n.p., n.d. Web. 13 June 2016.
"""
def __init__(self):
super(StructuralOptimization, self).__init__()
#Define material properties of tube
self.add_param('rho_tube',
val=7820.0,
units='kg/m**3',
desc='density of steel')
self.add_param('E_tube',
val=200.0 * (10**9),
units='Pa',
desc='Young\'s Modulus of tube')
self.add_param('v_tube', val=.3, desc='Poisson\'s ratio of tube')
self.add_param('Su_tube',
val=152.0e6,
units='Pa',
desc='ultimate strength of tube')
self.add_param('sf', val=1.5, desc='safety factor')
self.add_param('g', val=9.81, units='m/s**2', desc='gravity')
self.add_param('unit_cost_tube',
val=.3307,
units='USD/kg',
desc='cost of tube materials per unit mass')
self.add_param('p_tunnel',
val=100.0,
units='Pa',
desc='Tunnel Pressure')
self.add_param('p_ambient',
val=101300.0,
units='Pa',
desc='Ambient Pressure')
self.add_param('alpha_tube',
val=0.0,
desc='Coefficient of Thermal Expansion of tube')
self.add_param(
'dT_tube', val=0.0,
units='K', desc='Temperature change')
self.add_param('m_pod', val=3100.0, units='kg', desc='mass of pod')
self.add_param('tube_area', val=3.8013, units='m**2', desc='inner tube area')
#self.add_param('r', val=1.1, units='m', desc='inner tube radius')
self.add_param('t', val=.05, units='m', desc='tube thickness')
#self.add_param('dx', val = 500.0, units = 'm', desc = 'distance between pylons')
#Define pylon material properties
self.add_param('rho_pylon',
val=2400.0,
units='kg/m**3',
desc='density of pylon material')
self.add_param('E_pylon',
val=41.0 * (10**9),
units='Pa',
desc='Young\'s Modulus of pylon')
self.add_param('v_pylon', val=.2, desc='Poisson\'s ratio of pylon')
self.add_param('Su_pylon',
val=40.0 * (10**6),
units='Pa',
desc='ultimate strength_pylon')
self.add_param('unit_cost_pylon',
val=.05,
units='USD/kg',
desc='cost of pylon materials per unit mass')
self.add_param('h', val=10.0, units='m', desc='height of pylon')
self.add_param('r_pylon', val=1.1, units='m', desc='inner tube radius')
self.add_param('vac_weight', val=1500.0, units='kg', desc='vacuum weight')
#Define outputs
self.add_output('m_pylon',
val=0.0,
units='kg',
desc='total mass of the pylon')
self.add_output('m_prime',
val=100.0,
units='kg/m',
desc='total mass of the tube per unit length')
self.add_output('von_mises',
val=0.0,
units='Pa',
desc='max Von Mises Stress')
self.add_output('total_material_cost',
val=0.0,
units='USD/m',
desc='cost of materials')
self.add_output('R', val=0.0, units='N', desc='Force on pylon')
self.add_output('delta',
val=0.0,
units='m',
desc='max deflection inbetween pylons')
self.add_output('dx',
val=500.0,
units='m',
desc='distance between pylons')
self.add_output('t_crit',
val=0.0,
units='m',
desc='Minimum tunnel thickness for buckling')
def solve_nonlinear(self, params, unknowns, resids):
'''total material cost = ($/kg_tunnel)*m_prime + ($/kg_pylon)*m_pylon*(1/dx)
m_prime = mass of tunnel per unit length = rho_tube*pi*((r+t)^2-r^2)
m_pylon = mass of single pylon = rho_pylon*pi*(r_pylon^2)*h
Constraint equations derived from yield on buckling conditions
'''
rho_tube = params['rho_tube']
E_tube = params['E_tube']
v_tube = params['v_tube']
alpha_tube = params['alpha_tube']
dT_tube = params['dT_tube']
unit_cost_tube = params['unit_cost_tube']
g = params['g']
tube_area = params['tube_area']
#r = params['r']
t = params['t']
m_pod = params['m_pod']
p_tunnel = params['p_tunnel']
p_ambient = params['p_ambient']
Su_pylon = params['Su_pylon']
sf = params['sf']
rho_pylon = params['rho_pylon']
E_pylon = params['E_pylon']
r_pylon = params['r_pylon']
unit_cost_pylon = params['unit_cost_pylon']
h = params['h']
vac_weight = params['vac_weight']
#Compute intermediate variable
r = np.sqrt(tube_area/np.pi)
#print(r)
q = rho_tube * np.pi * ((
(r + t)**2) - (r**2)) * g #Calculate distributed load
dp = p_ambient - p_tunnel #Calculate delta pressure
I_tube = (np.pi / 4.0) * ((
(r + t)**4) - (r**4)) #Calculate moment of inertia of tube
m_prime = rho_tube * np.pi * ((
(r + t)**2) - (r**2)) #Calculate mass per unit length
dx = ((2 * (Su_pylon / sf) * np.pi *
(r_pylon**2)) - m_pod * g) / (m_prime * g) #Calculate dx
M = (q * (
(dx**2) / 8.0)) + (m_pod * g * (dx / 2.0)) #Calculate max moment
sig_theta = (dp * r) / t #Calculate hoop stress
sig_axial = ((dp * r) / (2 * t)) + (
(M * r) / I_tube
) + alpha_tube * E_tube * dT_tube #Calculate axial stress
von_mises = np.sqrt((((sig_theta**2) + (sig_axial**2) + (
(sig_axial - sig_theta)**2)) /
2.0)) #Calculate Von Mises stress
m_pylon = rho_pylon * np.pi * (r_pylon**
2) * h #Calculate mass of single pylon
# unknowns['total_material_cost'] = (unit_cost_tube * (rho_tube * np.pi * ((
# (r + t)**2) - (r**2)))) + (unit_cost_pylon * m_pylon * (1 / (
# ((2 * (Su_pylon / sf) * np.pi * (r_pylon**2)) - m_pod * g) /
# (m_prime * g))))
unknowns['total_material_cost'] = (unit_cost_tube * (rho_tube * np.pi * ((
(r + t)**2) - (r**2)))) + (unit_cost_pylon * m_pylon)/dx
unknowns['m_prime'] = m_prime
unknowns['von_mises'] = von_mises
unknowns['delta'] = (5.0 * q * (dx**4)) / (384.0 * E_tube * I_tube)
unknowns['m_pylon'] = m_pylon
unknowns['R'] = .5 * m_prime * dx * g + .5 * m_pod * g
unknowns['dx'] = dx
unknowns['t_crit'] = r * ((
(4.0 * dp * (1.0 - (v_tube**2))) / E_tube)**(1.0 / 3.0))
if __name__ == '__main__':
top = Problem()
root = top.root = Group()
params = (#('r', 1.1, {'units': 'm'}),
('tube_area', 53.134589, {'units': 'm**2'}),
('t', 5.0, {'units': 'm'}),
('r_pylon', 1.1, {'units': 'm'}),
('Su_tube', 152.0e6, {'units': 'Pa'}),
('sf', 1.5),
('p_ambient', 850.0, {'units': 'Pa'}),
('p_tunnel', 101300.0, {'units': 'Pa'}),
('v_tube', .3),
('rho_tube', 7820.0, {'units': 'kg/m**3'}),
('rho_pylon', 2400.0, {'units': 'Pa'}),
('Su_pylon', 40.0e6, {'units': 'Pa'}),
('E_pylon', 41.0e9, {'units': 'Pa'}),
('h', 10.0, {'units': 'm'}),
('m_pod', 3100.0, {'units': 'kg'})
)
root.add('input_vars', IndepVarComp(params))
root.add('p', StructuralOptimization())
root.add('con1', ExecComp(
'c1 = ((Su_tube/sf) - von_mises)')) #Impose yield stress constraint for tube
root.add('con2', ExecComp(
'c2 = t - t_crit')) #Impose buckling constraint for tube dx = ((pi**3)*E_pylon*(r_pylon**4))/(8*(h**2)*rho_tube*pi*(((r+t)**2)-(r**2))*g)
#root.connect('input_vars.r', 'p.r')
root.connect('input_vars.tube_area', 'p.tube_area')
root.connect('input_vars.t', 'p.t')
root.connect('input_vars.r_pylon', 'p.r_pylon')
root.connect('input_vars.Su_tube', 'con1.Su_tube')
root.connect('input_vars.sf', 'con1.sf')
root.connect('p.von_mises', 'con1.von_mises')
root.connect('input_vars.t', 'con2.t')
root.connect('p.t_crit', 'con2.t_crit')
root.p.deriv_options['type'] = "cs"
# root.p.deriv_options['form'] = 'forward'
root.p.deriv_options['step_size'] = 1.0e-10
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('input_vars.t', lower=.001, scaler=100.0)
top.driver.add_desvar('input_vars.r_pylon', lower=.1)
top.driver.add_objective('p.total_material_cost', scaler = 1.0e-4)
top.driver.add_constraint('con1.c1', lower=0.0, scaler=1000.0)
top.driver.add_constraint('con2.c2', lower=0.0)
top.setup()
top['p.p_tunnel'] = 850.0
# top['p.m_pod']= 10000.0
top['p.h'] = 10.0
import csv
# f = open('/Users/kennethdecker/Desktop/Paper figures/land_structural_trades.csv', 'wt')
# writer = csv.writer(f)
# writer.writerow(('A_tube', 'm=10000', 'm=15000', 'm=20000', 'cost'))
m_pod = np.linspace(10000.0, 20000, num = 3)
A_tube = np.linspace(20.0, 50.0, num = 30)
dx = np.zeros((len(m_pod), len(A_tube)))
t_tube = np.zeros((len(m_pod), len(A_tube)))
r_pylon = np.zeros((len(m_pod), len(A_tube)))
cost = np.zeros((1, len(A_tube)))
for i in range(len(A_tube)):
for j in range(len(m_pod)):
top['input_vars.tube_area'] = A_tube[i]
top['p.m_pod'] = m_pod[j]
top.run()
dx[j,i] = top['p.dx']
t_tube[j,i] = top['p.t']
r_pylon[j,i] = top['p.r_pylon']
cost[0,i] = top['p.total_material_cost']
# writer.writerow((A_tube[i], dx[0,i], dx[1,i], dx[2,i], cost[0,i]))
# f.close()
plt.hold(True)
# plt.subplot(211)
line1, = plt.plot(A_tube, dx[0,:], 'b-', linewidth = 2.0, label = 'pod mass = 10000 kg')
line2, = plt.plot(A_tube, dx[1,:], 'r-', linewidth = 2.0, label = 'pod mass = 15000 kg')
line3, = plt.plot(A_tube, dx[2,:], 'g-', linewidth = 2.0, label = 'pod mass = 20000 kg')
plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('Pylon Spacing (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.legend(handles = [line1, line2, line3], loc = 1)
plt.show()
plt.subplot(211)
line1, = plt.plot(A_tube, t_tube[0,:], 'b-', linewidth = 2.0, label = 'm_pod = 10000 kg')
line2, = plt.plot(A_tube, t_tube[1,:], 'r-', linewidth = 2.0, label = 'm_pod = 15000 kg')
line3, = plt.plot(A_tube, t_tube[2,:], 'g-', linewidth = 2.0, label = 'm_pod = 20000 kg')
# plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('tube thickness (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.legend(handles = [line1, line2, line3], loc = 1)
plt.subplot(212)
line1, = plt.plot(A_tube, r_pylon[0,:], 'b-', linewidth = 2.0, label = 'm_pod = 10000 kg')
line2, = plt.plot(A_tube, r_pylon[1,:], 'r-', linewidth = 2.0, label = 'm_pod = 15000 kg')
line3, = plt.plot(A_tube, r_pylon[2,:], 'g-', linewidth = 2.0, label = 'm_pod = 20000 kg')
plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('Pylon Radius (m)', fontsize = 12, fontweight = 'bold')
plt.grid('on')
plt.show()
plt.plot(A_tube, cost[0,:], 'r-', linewidth = 2.0)
plt.xlabel('Tube Area (m^2)', fontsize = 12, fontweight = 'bold')
plt.ylabel('Material Cost per Meter (USD/m)', fontsize = 12, fontweight = 'bold')
plt.show()
# plt.plot(A_tube, dx[0,:])
# plt.xlabel('Tube Area')
# plt.ylabel('pylon spacing')
# plt.show()
# plt.plot(A_tube, total_material_cost[0,:])
# plt.xlabel('Tube Area')
# plt.ylabel('Cost per unit length')
# plt.show()
R_buckle = ((np.pi**3) * top['p.E_tube'] *
(top['p.r_pylon']**4)) / (16 * (top['p.h']**2))
print('Optimizer pylon radius %f' % top['p.r_pylon'])
if top['p.R'] < R_buckle:
print('Pylon buckling constraint is satisfied')
else:
r_pylon_new = ((R_buckle * 16 * (top['p.h']**2)) / (
(np.pi**3) * top['p.E_tube']))**.25
print(
'Optimizer value did not satisfy pylon buckling condition. Pylon radius set to minimum buckling value')
print('new pylon radius is %f m' % r_pylon_new)
print('\n')
print('total material cost per m is $%6.2f/km' %
(top['p.total_material_cost'] * (1.0e3)))
print('pylon radius is %6.3f m' % top['p.r_pylon'])
print('tube thickness is %6.4f mm' % (top['p.t'] * (1.0e3)))
print('mass per unit length is %6.2f kg/m' % top['p.m_prime'])
print('vertical force on each pylon is %6.2f kN' % (top['p.R'] / (1.0e3)))
print('Von Mises stress is %6.3f MPa' % (top['p.von_mises'] / (1.0e6)))
print('distance between pylons is %6.2f m' % top['p.dx'])
print('max deflection is %6.4f mm' % (top['p.delta'] * (1.0e3)))
print('\n')
print('con1 = %f' % top['con1.c1'])
print('con2 = %f' % top['con2.c2'])
if top['con1.c1'] < 0.0:
print('con1 not satisfied')
elif top['con2.c2'] < 0.0:
print('con2 not satisfied')
else:
print('Yield constraints are satisfied') | apache-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_kernel_config.py | 24 | 15358 | # Configuration file for ipython-kernel.
c = get_config()
#------------------------------------------------------------------------------
# IPKernelApp configuration
#------------------------------------------------------------------------------
# IPython: an enhanced interactive Python shell.
# IPKernelApp will inherit config from: BaseIPythonApplication, Application,
# InteractiveShellApp, ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.IPKernelApp.hide_initial_ns = True
# The importstring for the DisplayHook factory
# c.IPKernelApp.displayhook_class = 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'
# A list of dotted module names of IPython extensions to load.
# c.IPKernelApp.extensions = []
# Execute the given command string.
# c.IPKernelApp.code_to_run = ''
# redirect stderr to the null device
# c.IPKernelApp.no_stderr = False
# The date format used by logging formatters for %(asctime)s
# c.IPKernelApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to create profile dir if it doesn't exist
# c.IPKernelApp.auto_create = False
# Reraise exceptions encountered loading IPython extensions?
# c.IPKernelApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.IPKernelApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.IPKernelApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.IPKernelApp.pylab = None
# Run the module as a script.
# c.IPKernelApp.module_to_run = ''
# The importstring for the OutStream factory
# c.IPKernelApp.outstream_class = 'IPython.kernel.zmq.iostream.OutStream'
# dotted module name of an IPython extension to load.
# c.IPKernelApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.IPKernelApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.IPKernelApp.overwrite = False
# The IPython profile to use.
# c.IPKernelApp.profile = 'default'
# List of files to run at IPython startup.
# c.IPKernelApp.exec_files = []
# The Logging format template
# c.IPKernelApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.IPKernelApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.IPKernelApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.IPKernelApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.IPKernelApp.exec_lines = []
# set the control (ROUTER) port [default: random]
# c.IPKernelApp.control_port = 0
# set the heartbeat port [default: random]
# c.IPKernelApp.hb_port = 0
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.IPKernelApp.gui = None
# A file to be run
# c.IPKernelApp.file_to_run = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.IPKernelApp.ipython_dir = ''
# kill this process if its parent dies. On Windows, the argument specifies the
# HANDLE of the parent process, otherwise it is simply boolean.
# c.IPKernelApp.parent_handle = 0
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.IPKernelApp.matplotlib = None
# set the iopub (PUB) port [default: random]
# c.IPKernelApp.iopub_port = 0
# redirect stdout to the null device
# c.IPKernelApp.no_stdout = False
#
# c.IPKernelApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.IPKernelApp.connection_file = ''
# The Kernel subclass to be used.
#
# This should allow easy re-use of the IPKernelApp entry point to configure and
# launch kernels other than IPython's own.
# c.IPKernelApp.kernel_class = <class 'IPython.kernel.zmq.ipkernel.IPythonKernel'>
# ONLY USED ON WINDOWS Interrupt this process when the parent is signaled.
# c.IPKernelApp.interrupt = 0
# set the shell (ROUTER) port [default: random]
# c.IPKernelApp.shell_port = 0
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.IPKernelApp.pylab_import_all = True
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.IPKernelApp.ip = ''
#------------------------------------------------------------------------------
# IPythonKernel configuration
#------------------------------------------------------------------------------
# IPythonKernel will inherit config from: Kernel
#
# c.IPythonKernel._execute_sleep = 0.0005
# Whether to use appnope for compatiblity with OS X App Nap.
#
# Only affects OS X >= 10.9.
# c.IPythonKernel._darwin_app_nap = True
#
# c.IPythonKernel._poll_interval = 0.05
#------------------------------------------------------------------------------
# ZMQInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of InteractiveShell for ZMQ.
# ZMQInteractiveShell will inherit config from: InteractiveShell
#
# c.ZMQInteractiveShell.object_info_string_level = 0
#
# c.ZMQInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQInteractiveShell.pdb = False
#
# c.ZMQInteractiveShell.ipython_dir = ''
#
# c.ZMQInteractiveShell.history_length = 10000
#
# c.ZMQInteractiveShell.readline_remove_delims = '-/~'
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQInteractiveShell.display_page = False
# Deprecated, use PromptManager.in2_template
# c.ZMQInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.ZMQInteractiveShell.separate_in = '\n'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQInteractiveShell.cache_size = 1000
#
# c.ZMQInteractiveShell.wildcards_case_sensitive = True
# The name of the logfile to use.
# c.ZMQInteractiveShell.logfile = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQInteractiveShell.ast_node_interactivity = 'last_expr'
#
# c.ZMQInteractiveShell.debug = False
#
# c.ZMQInteractiveShell.quiet = False
# Save multi-line entries as one entry in readline history
# c.ZMQInteractiveShell.multiline_history = True
# Deprecated, use PromptManager.in_template
# c.ZMQInteractiveShell.prompt_in1 = 'In [\\#]: '
# Enable magic commands to be called without the leading %.
# c.ZMQInteractiveShell.automagic = True
# The part of the banner to be printed before the profile
# c.ZMQInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQInteractiveShell.autocall = 0
#
# c.ZMQInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQInteractiveShell.colors = 'LightBG'
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQInteractiveShell.color_info = True
# Show rewritten input, e.g. for autocall.
# c.ZMQInteractiveShell.show_rewritten_input = True
#
# c.ZMQInteractiveShell.xmode = 'Context'
#
# c.ZMQInteractiveShell.separate_out2 = ''
# The part of the banner to be printed after the profile
# c.ZMQInteractiveShell.banner2 = ''
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQInteractiveShell.logappend = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQInteractiveShell.prompt_out = 'Out[\\#]: '
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.justify
# c.ZMQInteractiveShell.prompts_pad_left = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQInteractiveShell.ast_transformers = []
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
BonexGu/Blik2D-SDK | Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/factorization/python/ops/gmm.py | 47 | 5877 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| mit |
gautam1858/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | 14 | 62926 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any(
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
)
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
main_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
| apache-2.0 |
DamCB/tyssue | tyssue/utils/utils.py | 2 | 12124 | import warnings
import numpy as np
import logging
import pandas as pd
logger = logging.getLogger(name=__name__)
def _to_2d(df):
df_2d = to_nd(df, 2)
return df_2d
def _to_3d(df):
df_3d = to_nd(df, 3)
return df_3d
def to_nd(df, ndim):
"""
Give a new shape to an input data by duplicating its column.
Parameters
----------
df : input data that will be reshape
ndim : dimension of the new reshape data.
Returns
-------
df_nd : return array reshaped in ndim.
"""
df_nd = np.asarray(df).reshape((df.size, 1))
return df_nd
def combine_specs(*specs):
combined = {}
for spec in specs:
for key in spec:
if key in combined:
combined[key].update(spec[key])
else:
combined[key] = spec[key]
return combined
def spec_updater(specs, new):
"""
Add element to the new dictionary to the specs dictionary.
Update value if the key already exist.
Parameters
----------
specs: specification that will be modified
new: dictionary of new specification
"""
for key in new.keys():
if specs.get(key) is not None:
specs[key].update(new[key])
else:
specs[key] = new[key]
def set_data_columns(datasets, specs, reset=False):
"""Sets the columns of the dataframes in the datasets dictionnary to
the uniform values in the specs sub-dictionnaries.
Parameters
----------
datasets : dict of dataframes
specs : dict of dicts
reset : bool, default False
For each key in specs, the value is a dictionnary whose
keys are column names for the corresponding dataframe in
datasets. If there is no such column in the dataframe,
it is created. If the columns allready exists and reset is `True`,
the new value is used.
"""
for name, spec in specs.items():
if not len(spec):
continue
if "setting" in name:
continue
df = datasets.get(name)
if df is None:
warnings.warn(
f"There is no {name} dataset, so the {name}" " spec have no effect."
)
continue
for col, default in spec.items():
if col in df.columns and reset:
logger.warning(
"Reseting column %s of the %s" " dataset with new specs", col, name
)
if col not in df.columns or reset:
df[col] = default
def data_at_opposite(sheet, edge_data, free_value=None):
"""
Returns a pd.DataFrame with the values of the input edge_data
at the opposite edges. For free edges, optionaly replaces Nan values
with free_value
Parameters
----------
sheet: a :class:`Sheet` instance
edge_data: dataframe contain value of edge
Returns
-------
opposite: pandas series contain value of opposite edge
"""
if isinstance(edge_data, pd.Series):
opposite = pd.Series(
edge_data.reindex(sheet.edge_df["opposite"]).to_numpy(),
index=edge_data.index,
)
elif isinstance(edge_data, pd.DataFrame):
opposite = pd.DataFrame(
edge_data.reindex(sheet.edge_df["opposite"]).to_numpy(),
index=edge_data.index,
columns=edge_data.columns,
)
else:
opposite = pd.DataFrame(
np.asarray(edge_data).take(sheet.edge_df["opposite"].to_numpy(), axis=0),
index=sheet.edge_df.index,
)
if free_value is not None:
opposite = opposite.replace(np.nan, free_value)
return opposite
def get_sub_eptm(eptm, edges, copy=False):
"""
Define sub-epithelium corresponding to the edges.
Parameters
----------
eptm: a :class:`Epithelium` instance
edges: list of edges includes in the sub-epithelium
Returns
-------
sub_eptm: a :class:`Epithelium` instance
"""
from ..core.objects import Epithelium
datasets = {}
edge_df = eptm.edge_df.loc[edges]
if edge_df.empty:
warnings.warn("Sub epithelium appears to be empty")
return None
datasets["edge"] = edge_df
datasets["vert"] = eptm.vert_df.loc[set(edge_df["srce"])]
datasets["face"] = eptm.face_df.loc[set(edge_df["face"])]
if "cell" in eptm.datasets:
datasets["cell"] = eptm.cell_df.loc[set(edge_df["cell"])]
if copy:
for elem, df in datasets.items():
datasets[elem] = df.copy()
sub_eptm = Epithelium("sub", datasets, eptm.specs)
sub_eptm.datasets["edge"]["edge_o"] = edges
sub_eptm.datasets["edge"]["srce_o"] = edge_df["srce"]
sub_eptm.datasets["edge"]["trgt_o"] = edge_df["trgt"]
sub_eptm.datasets["edge"]["face_o"] = edge_df["face"]
if "cell" in eptm.datasets:
sub_eptm.datasets["edge"]["cell_o"] = edge_df["cell"]
sub_eptm.datasets["vert"]["srce_o"] = set(edge_df["srce"])
sub_eptm.datasets["face"]["face_o"] = set(edge_df["face"])
if "cell" in eptm.datasets:
sub_eptm.datasets["cell"]["cell_o"] = set(edge_df["cell"])
sub_eptm.reset_index()
sub_eptm.reset_topo()
return sub_eptm
def single_cell(eptm, cell, copy=False):
"""
Define epithelium instance for all element to a define cell.
Parameters
----------
eptm : a :class:`Epithelium` instance
cell : identifier of a cell
copy : bool, default `False`
Returns
-------
sub_etpm: class:'Epithelium' instance corresponding to the cell
"""
edges = eptm.edge_df[eptm.edge_df["cell"] == cell].index
return get_sub_eptm(eptm, edges, copy)
def scaled_unscaled(func, scale, eptm, geom, args=(), kwargs={}, coords=None):
"""Scales the epithelium by an homotetic factor `scale`, applies
the function `func`, and scales back to original size.
Parameters
----------
func: the function to apply to the scaled epithelium
scale: float, the scale to apply
eptm: a :class:`Epithelium` instance
geom: a :class:`Geometry` class
args: sequence, the arguments to pass to func
kwargs: dictionary, the keywords arguments
to pass to func
coords: the coordinates on which the scaling applies
If the execution of function fails, the scaling is still reverted
Returns
-------
res: the result of the function func
"""
if coords is None:
coords = eptm.coords
geom.scale(eptm, scale, coords)
geom.update_all(eptm)
try:
res = func(*args, **kwargs)
except:
raise
finally:
geom.scale(eptm, 1 / scale, coords)
geom.update_all(eptm)
return res
def modify_segments(eptm, modifiers):
"""Modifies the datasets of a segmented epithelium
according to the passed modifiers.
Parameters
----------
eptm : :class:`tyssue.Epithelium`
modifiers : nested dictionnary
Note
----
This functions assumes that the epithelium has a `segment_index`
method as implemented in the :class:`tyssue.Monolayer`.
Example
-------
>>> modifiers = {
>>> 'apical' : {
>>> 'edge': {'line_tension': 1.},
>>> 'face': {'prefered_area': 0.2},
>>> },
>>> 'basal' : {
>>> 'edge': {'line_tension': 3.},
>>> 'face': {'prefered_area': 0.1},
>>> }
>>> modify_segments(monolayer, modifiers)
>>> monolayer.ver_df.loc[monolayer.apical_edges,
>>> 'line_tension'].unique()[0] == 1.
True
"""
for segment, spec in modifiers.items():
for element, parameters in spec.items():
idx = eptm.segment_index(segment, element)
for param_name, param_value in parameters.items():
eptm.datasets[element].loc[idx, param_name] = param_value
def _compute_ar(df, coords):
u, v = coords
major = np.ptp(df[u].values)
minor = np.ptp(df[v].values)
if major < minor:
minor, major = major, minor
return 0 if minor == 0 else major / minor
def ar_calculation(sheet, coords=["x", "y"]):
"""Calculates the aspect ratio of each face of the sheet
Parameters
----------
eptm : a :class:`Sheet` object
coords : list of str, optional, default ['x', 'y']
the coordinates on which to compute the aspect ratio
Returns
-------
AR: pandas series of aspect ratio for all faces.
Note
----
As is the case in ImageJ, the returned aspect ratio is always higher than 1
"""
srce_pos = sheet.upcast_srce(sheet.vert_df[sheet.coords])
srce_pos["face"] = sheet.edge_df["face"]
return srce_pos.groupby("face").apply(_compute_ar, coords)
def get_next(eptm):
"""
Returns the indices of the next edge for each edge
"""
fs_indexed = (
eptm.edge_df[["face", "srce"]]
.reset_index()
.set_index(["face", "srce"], drop=False)
)
ft_index = pd.MultiIndex.from_frame(
eptm.edge_df[["face", "trgt"]], names=["face", "srce"]
)
next_ = fs_indexed.loc[ft_index, "edge"].values
return next_
## small utlity to swap apical and basal segments
def swap_apico_basal(organo):
"""Swap apical and basal segments of an organoid."""
for elem in ["vert", "face", "edge"]:
swaped = organo.datasets[elem]["segment"].copy()
swaped.loc[organo.segment_index("apical", elem)] = "basal"
swaped.loc[organo.segment_index("basal", elem)] = "apical"
organo.datasets[elem]["segment"] = swaped
def elem_centered_patch(eptm, elem_idx, neighbour_order, elem):
"""
Return subeptm centered on the element (cell or face) with index elem_idx
with neighbour_order neighbours around it.
Parameters
----------
eptm : a :class:`Epithelim` instance
index : int, id of the center element
neighbour_order: int,
neighbourhood 'degree' around the center element
Returns
-------
patch: an object with the same class as eptm
"""
if elem not in ("face", "cell"):
raise ValueError
elems = pd.Series(elem_idx).append(
eptm.get_neighborhood(elem_idx, neighbour_order, elem)[elem]
)
print(elems, elem)
edges = eptm.edge_df[eptm.edge_df[elem].isin(elems)].copy()
vertices = eptm.vert_df.loc[set(edges["srce"])].copy()
if elem == "cell":
faces = eptm.face_df.loc[set(edges["face"])].copy()
cells = eptm.cell_df.loc[elems].copy()
elif "cell" in edges.columns:
faces = eptm.face_df.loc[elems].copy()
cells = eptm.cell_df.loc[set(edges["cell"])].copy()
else:
faces = eptm.face_df.loc[elems].copy()
cells = None
pos = (
vertices[eptm.coords].values
- vertices[eptm.coords].mean(axis=0).values[None, :]
)
u, v, rotation = np.linalg.svd(pos, full_matrices=False)
vertices[eptm.coords] = np.dot(pos, rotation.T)
patch_dset = {"vert": vertices, "face": faces, "edge": edges}
if cells is not None:
patch_dset["cell"] = cells
patch = eptm.__class__("patch", patch_dset, eptm.specs)
patch.reset_index()
return patch
def face_centered_patch(sheet, face, neighbour_order):
"""
Return subsheet centered on face with a distance of
neighbour order around the face
Parameters
----------
sheet : a :class:`Sheet` object
face : int, id of the center face
neighbour_order: int, number of neighbour around the center face
Returns
-------
patch: an object of the same class as the input object
"""
return elem_centered_patch(sheet, face, neighbour_order, "face")
def cell_centered_patch(eptm, cell, neighbour_order):
"""
Return subsheet centered on cell with a distance of
neighbour order around the cell
Parameters
----------
eptm : a :class:`Epithelium` instance
face : int, id of the center face
neighbour_order: int, number of neighbour around the center face
Returns
-------
patch: an object of the same class as the input object
"""
return elem_centered_patch(eptm, cell, neighbour_order, "cell")
| gpl-3.0 |
stscieisenhamer/glue | glue/plugins/tools/spectrum_tool/qt/profile_viewer.py | 4 | 13923 | from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib.transforms import blended_transform_factory
from glue.core.callback_property import CallbackProperty, add_callback
PICK_THRESH = 30 # pixel distance threshold for picking
class Grip(object):
def __init__(self, viewer, artist=True):
self.viewer = viewer
self.enabled = True
self.artist = None
if artist:
self.artist = self._artist_factory()
def remove(self):
raise NotImplementedError()
def _artist_factory(self):
raise NotImplementedError()
def pick_dist(self, x, y):
"""
Return the distance, in pixels,
between a point in (x,y) data space and
the grip
"""
raise NotImplementedError()
def dblclick(self, x, y):
"""Respond to a double-click event
Default is to ignore
"""
pass
def select(self, x, y):
"""
Process a selection event (click) at x,y
"""
raise NotImplementedError()
def drag(self, x, y):
"""
Process a drag to x, y
"""
raise NotImplementedError()
def release(self):
"""
Process a release
"""
raise NotImplementedError()
def disable(self):
self.enabled = False
if self.artist is not None:
self.artist.set_visible(False)
self.viewer.axes.figure.canvas.draw()
def enable(self):
self.enabled = True
if self.artist is not None:
self.artist.set_visible(True)
self.viewer.axes.figure.canvas.draw()
class ValueGrip(Grip):
value = CallbackProperty(None)
def __init__(self, viewer, artist=True):
super(ValueGrip, self).__init__(viewer, artist)
self._drag = False
def _artist_factory(self):
return ValueArtist(self)
def dblclick(self, x, y):
self.value = x
def pick_dist(self, x, y):
xy = [[x, y], [self.value, y]]
xypix = self.viewer.axes.transData.transform(xy)
return abs(xypix[1, 0] - xypix[0, 0])
def select(self, x, y):
if self.pick_dist(x, y) > PICK_THRESH:
return
self._drag = True
def drag(self, x, y):
if self._drag:
self.value = x
def release(self):
self._drag = False
class RangeGrip(Grip):
range = CallbackProperty((None, None))
def __init__(self, viewer):
super(RangeGrip, self).__init__(viewer)
# track state during drags
self._move = None
self._ref = None
self._refx = None
self._refnew = None
def _artist_factory(self):
return RangeArtist(self)
def pick_dist(self, x, y):
xy = np.array([[x, y],
[self.range[0], y],
[self.range[1], y],
[sum(self.range) / 2, y]])
xypix = self.viewer.axes.transData.transform(xy)
dx = np.abs(xypix[1:] - xypix[0])[:, 0]
return min(dx)
def select(self, x, y):
if self.pick_dist(x, y) > PICK_THRESH:
return self.new_select(x, y)
cen = sum(self.range) / 2.
wid = self.range[1] - self.range[0]
if x < cen - wid / 4.:
self._move = 'left'
elif x < cen + wid / 4.:
self._move = 'center'
self._ref = self.range
self._refx = x
else:
self._move = 'right'
def new_select(self, x, y):
"""
Begin a selection in "new range" mode.
In this mode, the previous grip position is ignored,
and the new range is defined by the select/release positions
"""
self._refnew = x
self.range = (x, x)
def new_drag(self, x, y):
"""
Drag the selection in "new mode"
"""
if self._refnew is not None:
self._set_range(self._refnew, x)
def drag(self, x, y):
if self._refnew is not None:
return self.new_drag(x, y)
if self._move == 'left':
if x > self.range[1]:
self._move = 'right'
self._set_range(x, self.range[1])
elif self._move == 'center':
dx = (x - self._refx)
self._set_range(self._ref[0] + dx, self._ref[1] + dx)
else:
if x < self.range[0]:
self._move = 'left'
self._set_range(self.range[0], x)
def _set_range(self, lo, hi):
self.range = min(lo, hi), max(lo, hi)
def release(self):
self._move = None
self._ref = None
self._refx = None
self._refnew = None
class ValueArtist(object):
def __init__(self, grip, **kwargs):
self.grip = grip
add_callback(grip, 'value', self._update)
ax = self.grip.viewer.axes
kwargs.setdefault('lw', 2)
kwargs.setdefault('alpha', 0.5)
kwargs.setdefault('c', '#ffb304')
trans = blended_transform_factory(ax.transData, ax.transAxes)
self._line, = ax.plot([grip.value, grip.value], [0, 1],
transform=trans, **kwargs)
def _update(self, value):
self._line.set_xdata([value, value])
self._line.axes.figure.canvas.draw()
def set_visible(self, visible):
self._line.set_visible(visible)
class RangeArtist(object):
def __init__(self, grip, **kwargs):
self.grip = grip
add_callback(grip, 'range', self._update)
ax = grip.viewer.axes
trans = blended_transform_factory(ax.transData, ax.transAxes)
kwargs.setdefault('lw', 2)
kwargs.setdefault('alpha', 0.5)
kwargs.setdefault('c', '#ffb304')
self._line, = ax.plot(self.x, self.y, transform=trans, **kwargs)
@property
def x(self):
l, r = self.grip.range
return [l, l, l, r, r, r]
@property
def y(self):
return [0, 1, .5, .5, 0, 1]
def _update(self, rng):
self._line.set_xdata(self.x)
self._line.axes.figure.canvas.draw()
def set_visible(self, visible):
self._line.set_visible(visible)
def _build_axes(figure):
ax2 = figure.add_subplot(122)
ax1 = figure.add_subplot(121, sharex=ax2)
ax1.xaxis.get_major_formatter().set_useOffset(False)
ax1.yaxis.get_major_formatter().set_useOffset(False)
ax2.xaxis.get_major_formatter().set_useOffset(False)
ax2.yaxis.get_major_formatter().set_useOffset(False)
return ax1, ax2
class ProfileViewer(object):
value_cls = ValueGrip
range_cls = RangeGrip
def __init__(self, figure):
self.axes, self.resid_axes = _build_axes(figure)
self._artist = None
self._resid_artist = None
self._x = self._xatt = self._y = self._yatt = None
self._resid = None
self.connect()
self._fit_artists = []
self.active_grip = None # which grip should receive events?
self.grips = []
self._xlabel = ''
def set_xlabel(self, xlabel):
self._xlabel = xlabel
def autoscale_ylim(self):
x, y = self._x, self._y
xlim = self.axes.get_xlim()
mask = (xlim[0] <= x) & (x <= xlim[1])
ymask = y[mask]
if ymask.size == 0:
return
ylim = np.nan_to_num(np.array([np.nanmin(ymask), np.nanmax(ymask)]))
self.axes.set_ylim(ylim[0], ylim[1] + .05 * (ylim[1] - ylim[0]))
if self._resid is None:
return
assert self._resid.size == y.size
ymask = self._resid[mask]
ylim = np.nan_to_num([np.nanmin(ymask), np.nanmax(ymask)])
diff = .05 * (ylim[1] - ylim[0])
self.resid_axes.set_ylim(ylim[0] - diff, ylim[1] + diff)
def _relayout(self):
if self._resid_artist is not None:
self.axes.set_position([0.1, .35, .88, .6])
self.resid_axes.set_position([0.1, .15, .88, .2])
self.resid_axes.set_xlabel(self._xlabel)
self.resid_axes.set_visible(True)
self.axes.set_xlabel('')
[t.set_visible(False) for t in self.axes.get_xticklabels()]
else:
self.resid_axes.set_visible(False)
self.axes.set_position([0.1, .15, .88, .83])
self.axes.set_xlabel(self._xlabel)
[t.set_visible(True) for t in self.axes.get_xticklabels()]
def set_profile(self, x, y, xatt=None, yatt=None, **kwargs):
"""
Set a new line profile
:param x: X-coordinate data
:type x: array-like
:param y: Y-coordinate data
:type y: array-like
:param xatt: ComponentID associated with X axis
:type xatt: :class:`~glue.core.data.ComponentID`
:param yatt: ComponentID associated with Y axis
:type yatt: :class:`~glue.core.data.ComponentID`
Extra kwargs are passed to matplotlib.plot, to
customize plotting
Returns the created MPL artist
"""
self.clear_fit()
self._x = np.asarray(x).ravel()
self._xatt = xatt
self._y = np.asarray(y).ravel()
self._yatt = yatt
if self._artist is not None:
self._artist.remove()
kwargs.setdefault('drawstyle', 'steps-mid')
self._artist = self.axes.plot(x, y, **kwargs)[0]
self._relayout()
self._redraw()
return self._artist
def clear_fit(self):
for a in self._fit_artists:
a.remove()
self._fit_artists = []
if self._resid_artist is not None:
self._resid_artist.remove()
self._resid_artist = None
def connect(self):
connect = self.axes.figure.canvas.mpl_connect
self._down_id = connect('button_press_event', self._on_down)
self._up_id = connect('button_release_event', self._on_up)
self._move_id = connect('motion_notify_event', self._on_move)
def disconnect(self):
off = self.axes.figure.canvas.mpl_disconnect
self._down_id = off(self._down_id)
self._up_id = off(self._up_id)
self._move_id = off(self._move_id)
def _on_down(self, event):
if not event.inaxes:
return
if event.dblclick:
if self.active_grip is not None:
self.active_grip.dblclick(event.xdata, event.ydata)
return
if self.active_grip is not None and self.active_grip.enabled:
self.active_grip.select(event.xdata, event.ydata)
def _on_up(self, event):
if not event.inaxes:
return
if self.active_grip is None or not self.active_grip.enabled:
return
self.active_grip.release()
def _on_move(self, event):
if not event.inaxes or event.button != 1:
return
if self.active_grip is None or not self.active_grip.enabled:
return
self.active_grip.drag(event.xdata, event.ydata)
def _redraw(self):
self.axes.figure.canvas.draw()
def profile_data(self, xlim=None):
if self._x is None or self._y is None:
raise ValueError("Must set profile first")
x = self._x
y = self._y
if xlim is not None:
mask = (min(xlim) <= x) & (x <= max(xlim))
x = x[mask]
y = y[mask]
return x, y
def fit(self, fitter, xlim=None):
try:
x, y = self.profile_data(xlim)
dy = None
except ValueError:
raise ValueError("Must set profile before fitting")
result = fitter.build_and_fit(x, y)
return result, x, y, dy
def plot_fit(self, fitter, fit_result):
self.clear_fit()
x = self._x
y = fitter.predict(fit_result, x)
self._fit_artists = fitter.plot(fit_result, self.axes, x)
resid = self._y - y
self._resid = resid
self._resid_artist, = self.resid_axes.plot(x, resid, 'k')
self.autoscale_ylim()
self._relayout()
def new_value_grip(self, callback=None):
"""
Create and return new ValueGrip
:param callback: A callback function to be invoked
whenever the grip.value property changes
"""
result = self.value_cls(self)
result.value = self._center[0]
if callback is not None:
add_callback(result, 'value', callback)
self.grips.append(result)
self.active_grip = result
return result
def new_range_grip(self, callback=None):
"""
Create and return new RangeGrip
:param callback: A callback function to be invoked
whenever the grip.range property changes
"""
result = self.range_cls(self)
center = self._center[0]
width = self._width
result.range = center - width / 4, center + width / 4
if callback is not None:
add_callback(result, 'range', callback)
self.grips.append(result)
self.active_grip = result
return result
@property
def _center(self):
"""Return the data coordinates of the axes center, as (x, y)"""
xy = self.axes.transAxes.transform([(.5, .5)])
xy = self.axes.transData.inverted().transform(xy)
return tuple(xy.ravel())
@property
def _width(self):
"""Return the X-width of axes in data units"""
xlim = self.axes.get_xlim()
return xlim[1] - xlim[0]
def pick_grip(self, x, y):
"""
Given a coordinate in Data units,
return the enabled Grip object nearest
that point, or None if none are nearby
"""
grips = [h for h in self.grips if h.enabled]
if not grips:
return
dist, grip = min((h.pick_dist(x, y), h)
for h in grips)
if dist < PICK_THRESH:
return grip
| bsd-3-clause |
horance-liu/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator_test.py | 21 | 53471 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import text_format
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_state_pb2
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
def _build_estimator_for_resource_export_test():
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column('feature', dimension=4)
]
def resource_constant_model_fn(unused_features, unused_labels, mode):
"""A model_fn that loads a constant from a resource and serves it."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
const = constant_op.constant(-1, dtype=dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableModel')
update_global_step = variables.get_global_step().assign_add(1)
if mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL):
key = constant_op.constant(['key'])
value = constant_op.constant([42], dtype=dtypes.int64)
train_op_1 = table.insert(key, value)
training_state = lookup.MutableHashTable(
dtypes.string, dtypes.int64, const, name='LookupTableTrainingState')
training_op_2 = training_state.insert(key, value)
return (const, const,
control_flow_ops.group(train_op_1, training_op_2,
update_global_step))
if mode == model_fn.ModeKeys.INFER:
key = constant_op.constant(['key'])
prediction = table.lookup(key)
return prediction, const, update_global_step
est = estimator.Estimator(model_fn=resource_constant_model_fn)
est.fit(input_fn=_input_fn, steps=1)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
return est, serving_input_fn
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
def _model_fn_ops(
expected_features, expected_labels, actual_features, actual_labels, mode):
assert_ops = tuple([
check_ops.assert_equal(
expected_features[k], actual_features[k], name='assert_%s' % k)
for k in expected_features
] + [
check_ops.assert_equal(
expected_labels, actual_labels, name='assert_labels')
])
with ops.control_dependencies(assert_ops):
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1))
def _make_input_fn(features, labels):
def _input_fn():
return {
k: constant_op.constant(v)
for k, v in six.iteritems(features)
}, constant_op.constant(labels)
return _input_fn
class EstimatorModelFnTest(test.TestCase):
def testModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, mode, params, config):
model_fn_call_count[0] += 1
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
est = estimator.Estimator(
model_fn=_model_fn, params=expected_params, config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testPartialModelFnArgs(self):
features = {'x': 42., 'y': 43.}
labels = 44.
expected_params = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
expected_foo = 45.
expected_bar = 46.
# TODO(ptucker): We have to roll our own mock since Estimator._get_arguments
# doesn't work with mock fns.
model_fn_call_count = [0]
# `features` and `labels` are passed by position, `arg0` and `arg1` here.
def _model_fn(arg0, arg1, foo, mode, params, config, bar):
model_fn_call_count[0] += 1
self.assertEqual(expected_foo, foo)
self.assertEqual(expected_bar, bar)
self.assertItemsEqual(features.keys(), arg0.keys())
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_params, params)
self.assertTrue(config.i_am_test)
return _model_fn_ops(features, labels, arg0, arg1, mode)
partial_model_fn = functools.partial(
_model_fn, foo=expected_foo, bar=expected_bar)
est = estimator.Estimator(
model_fn=partial_model_fn, params=expected_params,
config=expected_config)
self.assertEqual(0, model_fn_call_count[0])
est.fit(input_fn=_make_input_fn(features, labels), steps=1)
self.assertEqual(1, model_fn_call_count[0])
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return (constant_op.constant(0.), constant_op.constant(0.),
variables.get_global_step().assign_add(1))
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing train_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
update_global_step = variables.get_global_step().assign_add(1)
with ops.control_dependencies([update_global_step]):
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffoldInTraining(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testModelFnScaffoldSaverUsage(self):
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
variables_lib.Variable(1., 'weight')
real_saver = saver_lib.Saver()
self.mock_saver = test.mock.Mock(
wraps=real_saver, saver_def=real_saver.saver_def)
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant([[1.]]),
loss=constant_op.constant(0.),
train_op=variables.get_global_step().assign_add(1),
scaffold=monitored_session.Scaffold(saver=self.mock_saver))
def input_fn():
return {
'x': constant_op.constant([[1.]]),
}, constant_op.constant([[1.]])
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.save.called)
est.evaluate(input_fn=input_fn, steps=1)
self.assertTrue(self.mock_saver.restore.called)
est.predict(input_fn=input_fn)
self.assertTrue(self.mock_saver.restore.called)
def serving_input_fn():
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[None],
name='input_example_tensor')
features, labels = input_fn()
return input_fn_utils.InputFnOps(
features, labels, {'examples': serialized_tf_example})
est.export_savedmodel(os.path.join(est.model_dir, 'export'), serving_input_fn)
self.assertTrue(self.mock_saver.restore.called)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaisesRegexp(
ValueError,
'model_dir are set both in constructor and RunConfig, '
'but with different'):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testModelDirIsCopiedToRunConfig(self):
config = run_config.RunConfig()
self.assertIsNone(config.model_dir)
est = estimator.Estimator(model_fn=linear_model_fn,
model_dir='test_dir',
config=config)
self.assertEqual('test_dir', est.config.model_dir)
self.assertEqual('test_dir', est.model_dir)
def testModelDirAsTempDir(self):
with test.mock.patch.object(tempfile, 'mkdtemp', return_value='temp_dir'):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertEqual('temp_dir', est.config.model_dir)
self.assertEqual('temp_dir', est.model_dir)
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def test_checkpoint_contains_relative_paths(self):
tmpdir = tempfile.mkdtemp()
est = estimator.Estimator(
model_dir=tmpdir,
model_fn=linear_model_fn_with_model_fn_ops)
est.fit(input_fn=boston_input_fn, steps=5)
checkpoint_file_content = file_io.read_file_to_string(
os.path.join(tmpdir, 'checkpoint'))
ckpt = checkpoint_state_pb2.CheckpointState()
text_format.Merge(checkpoint_file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, 'model.ckpt-5')
self.assertAllEqual(
['model.ckpt-1', 'model.ckpt-5'], ckpt.all_model_checkpoint_paths)
def test_train_save_copy_reload(self):
tmpdir = tempfile.mkdtemp()
model_dir1 = os.path.join(tmpdir, 'model_dir1')
est1 = estimator.Estimator(
model_dir=model_dir1,
model_fn=linear_model_fn_with_model_fn_ops)
est1.fit(input_fn=boston_input_fn, steps=5)
model_dir2 = os.path.join(tmpdir, 'model_dir2')
os.renames(model_dir1, model_dir2)
est2 = estimator.Estimator(
model_dir=model_dir2,
model_fn=linear_model_fn_with_model_fn_ops)
self.assertEqual(5, est2.get_variable_value('global_step'))
est2.fit(input_fn=boston_input_fn, steps=5)
self.assertEqual(10, est2.get_variable_value('global_step'))
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testSummaryWritingWithSummaryProto(self):
def _streaming_mean_squared_error_histogram(predictions,
labels,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
metrics, update_ops = metric_ops.streaming_mean_squared_error(
predictions,
labels,
weights=weights,
metrics_collections=metrics_collections,
updates_collections=updates_collections,
name=name)
return summary.histogram('histogram', metrics), update_ops
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(
input_fn=boston_input_fn,
steps=200,
metrics={'MSE': _streaming_mean_squared_error_histogram})
events = util_test.latest_events(est.model_dir + '/eval')
output_values = {}
for e in events:
if e.HasField('summary'):
for v in e.summary.value:
output_values[v.tag] = v
self.assertTrue('MSE' in output_values)
self.assertTrue(output_values['MSE'].HasField('histo'))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
self.assertItemsEqual(
['bogus_lookup', 'feature'],
[compat.as_str_any(x) for x in graph.get_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS)])
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_resource(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_resource_export_test()
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(export_dir_base, serving_input_fn)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('LookupTableModel' in graph_ops)
self.assertFalse('LookupTableTrainingState' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
def test_export_savedmodel_with_graph_transforms(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra,
graph_rewrite_specs=[
estimator.GraphRewriteSpec(['tag_1'], []),
estimator.GraphRewriteSpec(['tag_2', 'tag_3'],
['strip_unused_nodes'])])
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
# tag_1 is untransformed.
tags = ['tag_1']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# Since there were no transforms, both save ops are still present.
self.assertTrue('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# Since there were no transforms, the hash table lookup is still there.
self.assertTrue('hash_table_Lookup' in graph_ops)
# Restore, to validate that the export was well-formed.
# tag_2, tag_3 was subjected to strip_unused_nodes.
tags = ['tag_2', 'tag_3']
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, tags, export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# The Saver used to restore the checkpoint into the export Session
# was not added to the SAVERS collection, so strip_unused_nodes removes
# it. The one explicitly created in export_savedmodel is tracked in
# the MetaGraphDef saver_def field, so that one is retained.
# TODO(soergel): Make Savers sane again. I understand this is all a bit
# nuts but for now the test demonstrates what actually happens.
self.assertFalse('save/SaveV2/tensor_names' in graph_ops)
self.assertTrue('save_1/SaveV2/tensor_names' in graph_ops)
# The fake hash table lookup wasn't connected to anything; stripped.
self.assertFalse('hash_table_Lookup' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool),
None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i
for i in xrange(8)]
for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
| apache-2.0 |
dato-code/tutorials | strata-sj-2016/time-series/interactive_plot.py | 2 | 2657 |
import matplotlib.pyplot as _plt
from matplotlib.widgets import Button
_plt.style.use('ggplot')
## Plot an interactive version.
class LineDrawer(object):
def __init__(self, scores, guide_lines, threshold_lines):
self.guide_lines = guide_lines
self.threshold_lines = threshold_lines
self.figure = self.guide_lines[0].figure
self.scores = scores
self.anoms = self.scores[:0]
self.anom_plot = self.figure.axes[0].plot(self.anoms['time'],
self.anoms['count'],
color='red', lw=0, marker='o',
markersize=10,
alpha=0.7)
def connect(self):
"""Connect to the event."""
self.cid_press = self.figure.canvas.mpl_connect('button_press_event',
self.on_press)
def disconnect(self):
"""Disconnect the event bindings."""
self.figure.canvas.mpl_disconnect(self.cid_press)
def on_press(self, event):
"""Store the location data when the mouse button is pressed."""
if event.inaxes == self.figure.axes[0]:
self.threshold_lines[0].set_ydata((event.ydata, event.ydata))
self.threshold_lines[1].set_ydata((0., 0.))
self.threshold_lines[2].set_ydata((0., 0.))
col = self.scores.value_col_names[0]
elif event.inaxes == self.figure.axes[1]:
self.threshold_lines[1].set_ydata((event.ydata, event.ydata))
self.threshold_lines[0].set_ydata((0., 0.))
self.threshold_lines[2].set_ydata((0., 0.))
col = self.scores.value_col_names[1]
elif event.inaxes == self.figure.axes[2]:
self.threshold_lines[2].set_ydata((event.ydata, event.ydata))
self.threshold_lines[0].set_ydata((0., 0.))
self.threshold_lines[1].set_ydata((0., 0.))
col = self.scores.value_col_names[2]
else:
return
## Print the anomalies from the selected horizontal threshold.
mask = self.scores[col] >= event.ydata
self.anoms = self.scores[mask]
## Replot the anomalies on the first axes.
self.anom_plot[0].set_data((list(self.anoms['time']),
list(self.anoms['count'])))
## Re-position the vertical guide lines.
for line in self.guide_lines:
line.set_xdata((event.xdata, event.xdata))
## Re-draw the whole figure.
self.figure.canvas.draw()
| apache-2.0 |
moutai/scikit-learn | sklearn/cluster/tests/test_k_means.py | 10 | 29147 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics.cluster import homogeneity_score
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_elkan_results():
rnd = np.random.RandomState(0)
X_normal = rnd.normal(size=(50, 10))
X_blobs, _ = make_blobs(random_state=0)
km_full = KMeans(algorithm='full', n_clusters=5, random_state=0, n_init=1)
km_elkan = KMeans(algorithm='elkan', n_clusters=5,
random_state=0, n_init=1)
for X in [X_normal, X_blobs]:
km_full.fit(X)
km_elkan.fit(X)
assert_array_almost_equal(km_elkan.cluster_centers_, km_full.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_full.labels_)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regex(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_explicit_init_shape():
# test for sensible errors when giving explicit init
# with wrong number of features or clusters
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 3))
for Class in [KMeans, MiniBatchKMeans]:
# mismatch of number of features
km = Class(n_init=1, init=X[:, :2], n_clusters=len(X))
msg = "does not match the number of features of the data"
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:, :2], n_clusters=len(X))
assert_raises_regex(ValueError, msg, km.fit, X)
# mismatch of number of clusters
msg = "does not match the number of clusters"
km = Class(n_init=1, init=X[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
# for callable init
km = Class(n_init=1, init=lambda X_, k, random_state: X_[:2, :], n_clusters=3)
assert_raises_regex(ValueError, msg, km.fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42, n_clusters=2)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
msg = "does not match the number of clusters"
assert_raises_regex(ValueError, msg, MiniBatchKMeans(init=test_init,
random_state=42).fit,
X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42, n_init=1,
algorithm='elkan')
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42, n_init=1,
algorithm='elkan')
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_predict_equal_labels():
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='full')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
km = KMeans(random_state=13, n_jobs=1, n_init=1, max_iter=1,
algorithm='elkan')
km.fit(X)
assert_array_equal(km.predict(X), km.labels_)
def test_full_vs_elkan():
km1 = KMeans(algorithm='full', random_state=13)
km2 = KMeans(algorithm='elkan', random_state=13)
km1.fit(X)
km2.fit(X)
homogeneity_score(km1.predict(X), km2.predict(X)) == 1.0
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
def test_max_iter_error():
km = KMeans(max_iter=-1)
assert_raise_message(ValueError, 'Number of iterations should be', km.fit, X)
| bsd-3-clause |
vortex-ape/scikit-learn | benchmarks/bench_covertype.py | 4 | 7382 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <[email protected]>
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.utils import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, max_iter=1000, tol=1e-3),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3),
'SAG': LogisticRegression(solver='sag', max_iter=2, C=1000)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
jskDr/jamespy_py3 | wireless/nb_polar_r12.py | 1 | 20207 | import numpy as np
import numba as nb
import matplotlib.pyplot as plt
def calc_ber(e_array):
return np.mean(np.abs(e_array))
# Imitate static variable for a python function using decorate and setattr
def static_vars(**kwargs):
'''
@static_vars(counter=0)
def foo():
foo.counter += 1
print("Counter is %d" % foo.counter)
'''
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@nb.jit
def encode(u1, u2):
return (u1 + u2) % 2, u2
@nb.jit
def f_neg(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode(y1, y2):
l1 = f_neg(y1, y2)
u1_hard = 0 if l1 > 0 else 1
l2 = f_pos(y1, y2, u1_hard)
u2_hard = 0 if l2 > 0 else 1
return u1_hard, u2_hard, l1, l2
@nb.jit
def channel(x1, x2):
y1 = 1.0 - x1*2
y2 = 1.0 - x2*2
return y1, y2
@nb.jit
def coding(u1, u2):
x1, x2 = encode(u1, u2)
y1, y2 = channel(x1, x2)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array(u_array, e_array):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding(u_array[i,0], u_array[i,1])
def run_coding():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array(u_array, e_array)
print(e_array)
@nb.jit
def channel_awgn(x1, x2, SNRdB):
SNR = np.power(10, SNRdB/10)
Nsig = 1/np.sqrt(SNR)
n1 = np.random.normal(0) * Nsig
n2 = np.random.normal(0) * Nsig
y1 = 1.0 - x1*2 + n1
y2 = 1.0 - x2*2 + n2
return y1, y2
@nb.jit
def coding_awgn(u1, u2, SNRdB):
x1, x2 = encode(u1, u2)
y1, y2 = channel_awgn(x1, x2, SNRdB)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array_awgn(u_array, e_array, SNRdB):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding_awgn(u_array[i,0], u_array[i,1], SNRdB)
def run_coding_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
# print(e_array)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn(SNRdB_list=list(range(10))):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn(SNRdB)
BER_list.append(BER)
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run_coding_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def encode_array(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i,0], x_array[i,1] = encode(u_array[i,0], u_array[i,1])
return x_array
@nb.jit
def channel_array(x_array):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel(x_array[i,0], x_array[i,1])
return y_array
@nb.jit
def decode_array(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i,0], ud_array[i,1], _, _ = decode(y_array[i,0], y_array[i,1])
return ud_array
@nb.jit
def coding_array_all(u_array):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array(x_array)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all(u_array)
print(e_array)
@nb.jit
def channel_array_awgn(x_array, SNRdB):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel_awgn(x_array[i,0], x_array[i,1], SNRdB)
return y_array
@nb.jit
def _coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
print(e_array)
def run_coding_array_all_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
# print(BER)
return BER
def main_run_coding_array_all_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_array_all_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
@nb.jit
def channel_numpy_awgn(x_array, SNRdB):
"""
출력을 (0,1) --> (1,-1)로 바꾸고 가우시안 노이즈를 더함.
"""
#y_array = np.zeros(x_array.shape, dtype=nb.float_)
SNR = np.power(10, SNRdB/10)
noise_sig = 1/np.sqrt(SNR)
n_array = np.random.normal(0.0, noise_sig, size=x_array.shape)
y_array = 1.0 - x_array*2 + n_array
return y_array
# Usage list
# main_run_coding_awgn()
# run_coding_awgn()
# run_coding()
# N >= 2 Polar coding (Generalized)
@nb.jit
def encode_n(u):
"""
x = uBF(xn) where n = log(N), N=len(u), B is bit-reverse
"""
x = np.copy(u)
L = len(u)
if L != 1:
u1 = u[0::2]
u2 = u[1::2]
u1u2 = np.mod(u1 + u2, 2)
x[:L/2] = encode_n(u1u2)
x[L/2:] = encode_n(u2)
return x
@nb.jit
def encode_array_n(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i] = encode_n(u_array[i])
return x_array
@nb.jit
def f_neg_n(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos_n(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode_n_r0(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_hard[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_hard[:L/2])
u_hard[L/2:], x_hard[L/2:] = decode_n(l2)
x_hard[:L/2] = np.mod(x_hard[:L/2] + x_hard[L/2:], 2)
return u_hard, x_hard
@nb.jit
def decode_n(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_n(l2)
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_array_n(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_) #nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_n(y_array[i])
return ud_array
@nb.jit
def coding_array_all_awgn_n(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array_n(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array_n(y_array)
e_array = u_array - ud_array
return e_array
class PolarCode:
def __init__(self, N_code=2, K_code=2):
"""
N_code: Code block size
K_code: Information bit size
"""
self.N_code = N_code
self.K_code = K_code
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
u_array = np.random.randint(2, size=(N_iter, self.N_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_n(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
self.BER_list = BER_list
# ====================================================================
# Frozen을 고려하는 Polar Coding 시스템
# ====================================================================
@nb.jit
def _decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
if frozen_flag_n[0]:
u_hard[0] = 0
else:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
if frozen_flag_n[0]:
x_hard[0] = 0
else:
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_array_n(y_array, frozen_flag_n):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_frozen_n(y_array[i], frozen_flag_n)
return ud_array
@nb.jit
def frozen_encode_n(uf, u, f):
"""
Input:
uf: 길이 N_code인 코딩 블럭
u: 길이 K_code인 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(uf)):
if f[n]:
uf[n] = 0
else:
uf[n] = u[k]
k += 1
@nb.jit
def frozen_encode_array_n(u_array, frozen_flag_n):
N_iter = len(u_array)
N_code = len(frozen_flag_n)
uf_array = np.zeros(shape=(N_iter,N_code), dtype=nb.int_)
for i in range(N_iter):
frozen_encode_n(uf_array[i], u_array[i], frozen_flag_n)
return uf_array
@nb.jit
def frozen_decode_n(ud, ufd, f):
"""
Input:
ufd: 길이 N_code인 디코딩한 블럭
ud: 길이 K_code인 검출한 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(f)):
if f[n] == 0:
ud[k] = ufd[n]
k += 1
@nb.jit
def frozen_decode_array_n(ufd_array, frozen_flag_n):
N_iter = len(ufd_array)
N_code = len(frozen_flag_n)
K_code = N_code - np.sum(frozen_flag_n)
ud_array = np.zeros(shape=(N_iter,K_code), dtype=nb.int_)
for i in range(N_iter):
frozen_decode_n(ud_array[i], ufd_array[i], frozen_flag_n)
return ud_array
@nb.jit
def coding_array_all_awgn_frozen_n(u_array, frozen_flag_n, SNRdB=10):
e_array = np.zeros_like(u_array)
# encode를 하기 전과 decode 끝난 후에 frozen처리를 포함하면 됨.
# u_array는 길이가 K_code인 벡터들의 모임이고, uf_array는 길이가 N_code인 벡터들의 모임이다.
uf_array = frozen_encode_array_n(u_array, frozen_flag_n)
# encode_array_n()은 frozen 여부를 알 필요가 없음.
x_array = encode_array_n(uf_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ufd_array = decode_frozen_array_n(y_array, frozen_flag_n) # frozen을 고려한 함수로 변경되어야 함!
# ufd_array = decode_array_n(y_array)
ud_array = frozen_decode_array_n(ufd_array, frozen_flag_n)
e_array = u_array - ud_array
return e_array
class PolarCodeFrozen:
def __init__(self, N_code=2, K_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
N_code=4: Code block size
K_code=2: Information bit size
frozen_flag_n=[1,1,0,0]: 코드 블럭 안의 매 비트가 frozen인지 아닌지를 표시함. Frozen이면 1, 아니면 0임.
Frozen 아닌 비트들의 갯 수는 Code_K와 동일해야 함.
"""
if frozen_flag == 'auto':
frozen_flag_n = polar_design_bec(N_code=N_code, K_code=K_code)
print('Auto: frozen_flag_n =', frozen_flag_n)
assert N_code == len(frozen_flag_n)
assert N_code - K_code == np.sum(frozen_flag_n)
self.N_code = N_code
self.K_code = K_code
self.frozen_flag_n = frozen_flag_n
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수느느 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
print("SNRdB_list, BER_list")
print(SNRdB_list, BER_list)
self.BER_list = BER_list
def _polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y = np.ones(N_code) - 2*p
y[np.random.rand(N_code)<p] = -1 + 2*p
ud_hat, _ = decode_frozen_n(y, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
def polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y_bin = np.zeros(N_code) + p
y_bin[np.random.rand(N_code)<p] = 1 - p
ud_hat, _ = decode_frozen_n(1-2*y_bin, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
@nb.jit
def polar_bec(N_code=4, erase_prob=0.5):
"""
BEC에 대해 Polar code의 예측 성능을 구한다. 단, 비트당 제거 오류율은 erase_prob로 가정한다.
"""
n = int(np.log2(N_code))
E = np.zeros(N_code)
# E_out = np.zeros(N_code)
E[0] = erase_prob
for i in range(n):
LN = 2**i
# print(i, LN)
# print('E in:', E)
# i stage에서 끝은 LN*2이다. 안그러면 broadcast되어 버린다.
E[LN:LN*2] = E[:LN] * E[:LN]
E[:LN] = 1-(1-E[:LN])*(1-E[:LN])
# print('E out:', E)
return E
def polar_design_bec(N_code=4, K_code=2, erase_prob=0.5):
"""
BEC일 경우에 각 비트의 오류율을 계산해 frozen_flag를 만든다.
"""
biterrd = polar_bec(N_code=N_code, erase_prob=erase_prob)
idx = np.argsort(biterrd)
frozen_flag_n = np.ones(N_code, dtype=int)
frozen_flag_n[idx[:K_code]] = 0
print('BER for each bit', biterrd)
return frozen_flag_n
if __name__ == '__main__':
# main_run_coding_awgn()
# main_run_coding_array_all_awgn_tile(Ntile=100000, flag_fig=True)
f = polar_design_bec(2,1)
print(f) | mit |
mattgiguere/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
gbook/nidb | src/qcmodules/MRStructuralMotion/MRStructuralMotion.py | 1 | 14647 | # the order of these calls is important...
import sys
import os
import re
import getopt
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
import scipy.ndimage
import scipy.misc
from scipy import stats
import numpy
import math
import string
import random
from subprocess import call
import shutil
import glob
import MySQLdb
import time
# -------- main --------
def main():
# get start time
t0 = time.clock()
LoadConfig()
db = MySQLdb.connect(host=cfg['mysqlhost'], user=cfg['mysqluser'], passwd=cfg['mysqlpassword'], db=cfg['mysqldatabase'])
# indir is the original dicom directory for that series in the archive
moduleseriesid = sys.argv[1]
# get all the path information from the database
sqlstring = "select * from qc_moduleseries where qcmoduleseries_id = " + moduleseriesid
result = db.cursor(MySQLdb.cursors.DictCursor)
result.execute(sqlstring)
row = result.fetchone()
seriesid = row['series_id']
modality = row['modality']
# get the paths to the raw data, and copy it to a temp directory
sqlstring = "select a.series_num, a.is_derived, a.data_type, a.bold_reps, a.img_rows, b.study_num, d.uid from {0}_series a left join studies b on a.study_id = b.study_id left join enrollment c on b.enrollment_id = c.enrollment_id left join subjects d on c.subject_id = d.subject_id left join projects e on c.project_id = e.project_id where a.{1}series_id = '{2}'".format(modality,modality,seriesid)
print(sqlstring)
result = db.cursor(MySQLdb.cursors.DictCursor)
result.execute(sqlstring)
row = result.fetchone()
uid = row['uid']
study_num = row['study_num']
series_num = row['series_num']
datatype = row['data_type']
boldreps = row['bold_reps']
imgrows = row['img_rows']
if boldreps > 1:
print "Bold reps greater than 1, skipping"
exit(0)
if imgrows > 512:
print "Y dimension greater than 512 pixels, skipping"
exit(0)
# build the indir
indir = "{0}/{1}/{2}/{3}/{4}".format(cfg['archivedir'], uid, study_num, series_num, datatype)
print indir
#exit(0)
# create a tmp directory
outdir = '/tmp/Py_' + GenerateRandomString()
print ("Output directory: " + outdir)
if not os.path.exists(outdir):
os.makedirs(outdir)
# create a nifti file to check the sizes
#systemstring = "{0}/./dcm2nii -b '{0}/dcm2nii_4D.ini' -a y -e y -g y -p n -i n -d n -f n -o '{1}' {2}/*.dcm".format(cfg['scriptdir'],outdir,indir)
#print("\nRunning: [" + systemstring + "]\n")
#call(systemstring, shell=True)
# rename the file to 4D
#systemstring = "mv {0}/*.nii.gz {0}/4D.nii.gz".format(outdir)
#print("\nRunning: [" + systemstring + "]\n")
#call(systemstring, shell=True)
# get file dimensions
#systemstring = "fslval"
#dim4 =
# copy all dicom files to outdir (in case you screw up and delete the raw dicoms :(
systemstring = "cp " + indir + "/*.dcm " + outdir
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# go into the temp directory
os.chdir(outdir)
# convert all the dicom files in the input directory INTO the temp directory as png files
systemstring = "mogrify -depth 16 -format png *.dcm"
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# get list of png files
pngfiles = sorted(glob.glob('*.png'))
#print pngfiles
# check if there's only 1 file
if len(pngfiles) < 2:
print 0
exit(0)
i = 0
#totala = totalb = 0
#print '[%s]' % ', '.join(map(str, pngfiles))
allhist = []
for pngfile in pngfiles:
print os.path.exists(pngfile)
brain = matplotlib.image.imread(pngfile)
type(brain)
print brain.shape
print brain.dtype
#fft = numpy.log10(1+abs(numpy.fft.fftshift(numpy.fft.fft2(brain))))
fft = 1+abs(numpy.fft.fftshift(numpy.fft.fft2(brain)))
filename = "slice%d.png"%i
matplotlib.image.imsave(filename,fft)
print "Entering into azimuthalAverage({0}/{1}/{2})".format(uid,study_num,series_num)
histogram = azimuthalAverage(fft)
print "Should be done with azimuthalAverage({0}/{1}/{2})".format(uid,study_num,series_num)
# remove last element, because its always a NaN
#print 'Before [%s]' % ', '.join(map(str, histogram))
#print histogram.shape
histogram = numpy.delete(histogram, -1, 0)
#print 'After [%s]' % ', '.join(map(str, histogram))
# add this histo to the total histo
allhist.append(histogram)
#print allhist.size
#print float(i)
#print float(len(pngfiles)-1.0)
c = str(float(i)/float(len(pngfiles)-1.0))
#print "%.1f %% complete"%( (float(i)/(len(pngfiles)-1))*100)
lines = pyplot.plot(numpy.log10(histogram))
pyplot.setp(lines, color='#0000AA', alpha=0.25)
#totala += a
#totalb += b
i+=1
print "Hello"
allhist2 = numpy.vstack(allhist)
meanhistogram = allhist2.mean(axis=1)
print len(meanhistogram)
#del meanhistogram[-1]
print '[%s]' % ', '.join(map(str, allhist))
#a,b = linreg(range(len(meanhistogram)),meanhistogram)
#print "a,b [%d,%d]",a,b
# find mean slopes
#meana = totala/float(i)
#meanb = totalb/float(i)
dists = []
dists.extend(range(0,len(meanhistogram)))
#print dists
slope, intercept, r_value, p_value, std_err = stats.linregress(dists,meanhistogram)
pyplot.setp(lines, color='#0000AA', alpha=0.25)
print "R-value: "
print (slope)
#write out the final composite histogram
pyplot.xlabel('Frequency (lo -> hi)')
pyplot.ylabel('Power (log10)')
suptitle = 'Radial average of FFT (' + indir + ')'
pyplot.suptitle(suptitle)
title = "R^2: {0}".format(slope)
pyplot.title(title)
pyplot.grid(True)
#slope, intercept = numpy.polyfit(meanhistogram, dists, 1)
#idealhistogram = intercept + (slope * meanhistogram)
#r_sq = numpy.r_squared(dists, idealhistogram)
#r_sq = slope*slope
#fit_label = 'Linear fit ({0:.2f})'.format(slope)
#pyplot.plot(dists, idealhistogram, color='red', linestyle='--', label=fit_label)
#pyplot.annotate('r^2 = {0:.2f}'.format(r_sq), (0.05, 0.9), xycoords='axes fraction')
#pyplot.legend(loc='lower right')
# save the figure
pyplot.savefig('StructuralMotionHistogram.png')
# record the slope/intercept
if not os.path.exists(indir + "/qa"):
os.makedirs(indir + "/qa")
qafile = indir + "/qa/StructuralMotionR2.txt"
file = open(qafile, "w")
theline = "%f"%(slope)
file.write(theline)
file.close()
# get stop time
t = time.clock() - t0
# insert the result name into the database
sqlstring = "select qcresultname_id from qc_resultnames where qcresult_name = 'MotionR2'"
resultA = db.cursor(MySQLdb.cursors.DictCursor)
resultA.execute(sqlstring)
rowA = resultA.fetchone()
if resultA.rowcount > 0:
resultnameid = rowA['qcresultname_id']
else:
# insert a row
sqlstring = "insert into qc_resultnames (qcresult_name, qcresult_type) values ('MotionR2','number')"
print(sqlstring)
resultB = db.cursor(MySQLdb.cursors.DictCursor)
resultB.execute(sqlstring)
resultnameid = resultB.lastrowid
# InnoDB table... needs commit!
sqlstring = "insert into qc_results (qcmoduleseries_id, qcresultname_id, qcresults_valuenumber, qcresults_datetime, qcresults_cputime) values ({0}, {1}, {2}, now(), {3})".format(moduleseriesid,resultnameid,slope,t)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
# insert the image name into the resultnames table
sqlstring = "select qcresultname_id from qc_resultnames where qcresult_name = 'MotionR2 Plot'"
resultA = db.cursor(MySQLdb.cursors.DictCursor)
resultA.execute(sqlstring)
rowA = resultA.fetchone()
if resultA.rowcount > 0:
resultnameid = rowA['qcresultname_id']
else:
# insert a row
sqlstring = "insert into qc_resultnames (qcresult_name, qcresult_type) values ('MotionR2 Plot', 'image')"
print(sqlstring)
resultB = db.cursor(MySQLdb.cursors.DictCursor)
resultB.execute(sqlstring)
resultnameid = resultB.lastrowid
# insert an entry for the image into the database
sqlstring = "insert into qc_results (qcmoduleseries_id, qcresultname_id, qcresults_valuefile, qcresults_datetime) values ({0}, {1}, 'StructuralMotionHistogram.png', now())".format(moduleseriesid,resultnameid)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
# insert the R2 value into the mr_qa table
sqlstring = "update mr_qa set motion_rsq = '{0}' where mrseries_id = {1}".format(r_value**2,seriesid)
print(sqlstring)
cursor = db.cursor()
try:
cursor.execute(sqlstring)
db.commit()
except:
print("SQL statement [" + sqlstring + "] failed")
db.rollback()
exit(0)
#copy the histogram back to the qa directory
systemstring = "cp " + outdir + "/StructuralMotionHistogram.png " + "{0}/{1}/{2}/{3}/qa".format(cfg['archivedir'], uid, study_num, series_num)
#print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
# remove the temp directory and all its contents
#shutil.rmtree(outdir)
systemstring = "rm -r " + outdir
print("Running: [" + systemstring + "]")
call(systemstring, shell=True)
exit(0)
# -----------------------------------------------------------------------------
# ---------- azimuthalAverage -------------------------------------------------
# -----------------------------------------------------------------------------
# Compute the radial average of an image. Assumes the FFT image is centered
# -----------------------------------------------------------------------------
def azimuthalAverage(image, center=None, stddev=False, returnradii=False, return_nr=False,
binsize=1, weights=None, steps=False, interpnan=False, left=None, right=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fractional pixels).
stddev - if specified, return the azimuthal standard deviation instead of the average
returnradii - if specified, return (radii_array,radial_profile)
return_nr - if specified, return number of pixels per radius *and* radius
binsize - size of the averaging bin. Can lead to strange results if
non-binsize factors are used to specify the center and the binsize is
too large
weights - can do a weighted average instead of a simple average if this keyword parameter
is set. weights.shape must = image.shape. weighted stddev is undefined, so don't
set weights and stddev.
steps - if specified, will return a double-length bin array and radial
profile so you can plot a step-form radial profile (which more accurately
represents what's going on)
interpnan - Interpolate over NAN values, i.e. bins where there is no data?
left,right - passed to interpnan; they set the extrapolated values
If a bin contains NO DATA, it will have a NAN value because of the
divide-by-sum-of-weights component. I think this is a useful way to denote
lack of data, but users let me know if an alternative is prefered...
"""
# Calculate the indices from the image
y, x = numpy.indices(image.shape)
if center is None:
center = numpy.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = numpy.hypot(x - center[0], y - center[1])
if weights is None:
weights = numpy.ones(image.shape)
elif stddev:
raise ValueError("Weighted standard deviation is not defined.")
# the 'bins' as initially defined are lower/upper bounds for each bin
# so that values will be in [lower,upper)
nbins = int(numpy.round(r.max() / binsize)+1)
maxbin = nbins * binsize
bins = numpy.linspace(0,maxbin,nbins+1)
# but we're probably more interested in the bin centers than their left or right sides...
bin_centers = (bins[1:]+bins[:-1])/2.0
# Find out which radial bin each point in the map belongs to
whichbin = numpy.digitize(r.flat,bins)
# how many per bin (i.e., histogram)?
# there are never any in bin 0, because the lowest index returned by digitize is 1
nr = numpy.bincount(whichbin)[1:]
# recall that bins are from 1 to nbins (which is expressed in array terms by arange(nbins)+1 or xrange(1,nbins+1) )
# radial_prof.shape = bin_centers.shape
if stddev:
radial_prof = numpy.array([image.flat[whichbin==b].std() for b in xrange(1,nbins+1)])
else:
radial_prof = numpy.array([(image*weights).flat[whichbin==b].sum() / weights.flat[whichbin==b].sum() for b in xrange(1,nbins+1)])
#import pdb; pdb.set_trace()
if interpnan:
radial_prof = numpy.interp(bin_centers,bin_centers[radial_prof==radial_prof],radial_prof[radial_prof==radial_prof],left=left,right=right)
if steps:
xarr = numpy.array(zip(bins[:-1],bins[1:])).ravel()
yarr = numpy.array(zip(radial_prof,radial_prof)).ravel()
return xarr,yarr
elif returnradii:
return bin_centers,radial_prof
elif return_nr:
return nr,bin_centers,radial_prof
else:
return radial_prof
# -----------------------------------------------------------------------------
# ---------- GenerateRandomString ---------------------------------------------
# -----------------------------------------------------------------------------
# Compute the radial average of an image. Assumes the FFT image is centered
# -----------------------------------------------------------------------------
def GenerateRandomString(size=10, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for x in range(size))
# -----------------------------------------------------------------------------
# ---------- LoadConfig -------------------------------------------------------
# -----------------------------------------------------------------------------
# Load the NiDB configuration file which includes database and path info
# -----------------------------------------------------------------------------
def LoadConfig():
global cfg
cfg = {}
f = open('/ado2/prod/programs/nidb.cfg','r')
#f = open('../../nidb.cfg','r')
#with open('../../nidb.cfg') as f:
try:
for line in f:
line = line.strip()
if (line != "") and (line[0] != "#"):
[variable, value] = line.split(' = ')
variable = re.sub('(\[|\])','',variable)
cfg[variable] = value
#print variable
finally:
f.close()
return
# I guess this is needed to execute the main function if none other is called...
# basically defining an entry point into the program
if __name__ == "__main__":
sys.exit(main()) | gpl-3.0 |
qifeigit/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tseries/tests/test_base.py | 2 | 51601 | from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEqual(s.year,2000)
self.assertEqual(s.month,1)
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
Length: 0, Freq: D, Timezone: None"""
exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01]
Length: 1, Freq: D, Timezone: None"""
exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D, Timezone: None"""
exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D, Timezone: None"""
exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
Length: 3, Freq: H, Timezone: Asia/Tokyo"""
exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00-05:00, ..., NaT]
Length: 3, Freq: None, Timezone: US/Eastern"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00
Freq: H"""
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng - delta
expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ ]
def test_ops_properties(self):
self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
self.check_ops_properties(['microseconds','nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
Length: 0, Freq: D"""
exp2 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days']
Length: 1, Freq: D"""
exp3 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days', '2 days']
Length: 2, Freq: D"""
exp4 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days', ..., '3 days']
Length: 3, Freq: D"""
exp5 = """<class 'pandas.tseries.tdi.TimedeltaIndex'>
['1 days 00:00:01', ..., '3 days 00:00:00']
Length: 3, Freq: None"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, '1 days' to '1 days'
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, '1 days' to '2 days'
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, '1 days' to '3 days'
Freq: D"""
exp5 = """TimedeltaIndex: 3 entries, '1 days 00:00:01' to '3 days 00:00:00'"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
rng = timedelta_range('1 days','10 days',name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda : rng * offset)
# divide
expected = Int64Index((np.arange(10)+1)*12,name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# divide with nats
rng = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
expected = Float64Index([12,np.nan,24])
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda : rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda : tdi - dt)
self.assertRaises(TypeError, lambda : tdi - dti)
self.assertRaises(TypeError, lambda : td - dt)
self.assertRaises(TypeError, lambda : td - dti)
result = dt-dti
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti-dt
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = tdi-td
expected = TimedeltaIndex(['0 days',pd.NaT,'1 days'])
tm.assert_index_equal(result,expected)
result = td-tdi
expected = TimedeltaIndex(['0 days',pd.NaT,'-1 days'])
tm.assert_index_equal(result,expected)
result = dti-td
expected = DatetimeIndex(['20121231','20130101','20130102'])
tm.assert_index_equal(result,expected)
result = dt-tdi
expected = DatetimeIndex(['20121231',pd.NaT,'20121230'])
tm.assert_index_equal(result,expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101',periods=3)
ts = Timestamp('20130101')
dt = ts.to_datetime()
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_datetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result,expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda : dt_tz - ts)
self.assertRaises(TypeError, lambda : dt_tz - dt)
self.assertRaises(TypeError, lambda : dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda : dt - dt_tz)
self.assertRaises(TypeError, lambda : ts - dt_tz)
self.assertRaises(TypeError, lambda : ts_tz2 - ts)
self.assertRaises(TypeError, lambda : ts_tz2 - dt)
self.assertRaises(TypeError, lambda : ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda : dti - ts_tz)
self.assertRaises(TypeError, lambda : dti_tz - ts)
self.assertRaises(TypeError, lambda : dti_tz - ts_tz2)
result = dti_tz-dt_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = dt_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti_tz-ts_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = ts_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern')
tm.assert_index_equal(result,expected)
def test_dti_dti_deprecated_ops(self):
# deprecated in 0.16.0 (GH9094)
# change to return subtraction -> TimeDeltaIndex in 0.17.0
# shoudl move to the appropriate sections above
dti = date_range('20130101',periods=3)
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti+dti
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz+dti_tz
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti-dti_tz
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti_tz+dti)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti+dti_tz)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi-tdi
expected = TimedeltaIndex(['0 days',pd.NaT,'0 days'])
tm.assert_index_equal(result,expected)
result = tdi+tdi
expected = TimedeltaIndex(['2 days',pd.NaT,'4 days'])
tm.assert_index_equal(result,expected)
result = dti-tdi
expected = DatetimeIndex(['20121231',pd.NaT,'20130101'])
tm.assert_index_equal(result,expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days',pd.NaT,'2 days'],name='foo')
dti = date_range('20130101',periods=3)
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
tm.assert_index_equal(result,expected)
result = dt + tdi
expected = DatetimeIndex(['20130102',pd.NaT,'20130103'])
tm.assert_index_equal(result,expected)
result = td + tdi
expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
tm.assert_index_equal(result,expected)
result = tdi + td
expected = TimedeltaIndex(['2 days',pd.NaT,'3 days'])
tm.assert_index_equal(result,expected)
# unequal length
self.assertRaises(ValueError, lambda : tdi + dti[0:1])
self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
# this is a union!
#self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti
expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
tm.assert_index_equal(result,expected)
result = dti + tdi
expected = DatetimeIndex(['20130102',pd.NaT,'20130105'])
tm.assert_index_equal(result,expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result,expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result,expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
'1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """<class 'pandas.tseries.period.PeriodIndex'>
Length: 0, Freq: D"""
exp2 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01]
Length: 1, Freq: D"""
exp3 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D"""
exp4 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D"""
exp5 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011, ..., 2013]
Length: 3, Freq: A-DEC"""
exp6 = """<class 'pandas.tseries.period.PeriodIndex'>
[2011-01-01 09:00, ..., NaT]
Length: 3, Freq: H"""
exp7 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1]
Length: 1, Freq: Q-DEC"""
exp8 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1, 2013Q2]
Length: 2, Freq: Q-DEC"""
exp9 = """<class 'pandas.tseries.period.PeriodIndex'>
[2013Q1, ..., 2013Q3]
Length: 3, Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH 6527
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3),
np.timedelta64(72, 'h'), Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120),
np.timedelta64(120, 'm'), Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
'2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-2.0 |
legacysurvey/legacypipe | py/legacyanalysis/test_djspsf.py | 2 | 2808 | from __future__ import print_function
import sys
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
from astrometry.util.file import trymakedirs
from tractor import *
from tractor.psfex import *
from legacypipe.survey import *
def main():
plt.figure(figsize=(4,8))
plt.subplots_adjust(hspace=0.2, wspace=0.2)
#expnum = 349185
#ccdname = 'N7'
#djs = PixelizedPsfEx(fn='psf-c4d_140818_011313_ooi_z_v1.fits',
# psfexmodel=SchlegelPsfModel)
expnum = 396086
ccdname = 'S31'
djs = PixelizedPsfEx(fn='decam-00396086-S31.fits',
psfexmodel=SchlegelPsfModel)
print('Schlegel PSF', djs)
stampsz = 15
print('Plotting bases')
djs.psfex.plot_bases(autoscale=False)
plt.suptitle('DJS PSF basis functions')
plt.savefig('djs-bases.png')
djs.psfex.plot_bases(stampsize=stampsz, autoscale=False)
plt.suptitle('DJS PSF basis functions')
plt.savefig('djs-bases2.png')
H,W = 4096, 2048
nx,ny = 6,11
yy = np.linspace(0., H, ny+1)
xx = np.linspace(0., W, nx+1)
# center of cells
yy = yy[:-1] + (yy[1]-yy[0])/2.
xx = xx[:-1] + (xx[1]-xx[0])/2.
mx = djs.psfex.psfbases.max()
kwa = dict(vmin=-0.1*mx, vmax=mx)
plt.subplots_adjust(hspace=0.0, wspace=0.0)
print('Plotting grid')
djs.psfex.plot_grid(xx, yy, stampsize=stampsz, **kwa)
plt.suptitle('DJS PSF grid')
plt.savefig('djs-grid.png')
for i in range(djs.psfex.nbases):
print('Plotting grid for parameter', i)
djs.psfex.plot_grid(xx, yy, term=i, stampsize=stampsz, **kwa)
plt.savefig('djs-term%i.png' % i)
survey = LegacySurveyData()
ccds = survey.find_ccds(expnum=expnum,ccdname=ccdname)
ccd = ccds[0]
im = survey.get_image_object(ccd)
band = ccd.filter
im.run_calibs()
tim = im.get_tractor_image(pixPsf=True, nanomaggies=False, subsky=False)
psfex = tim.psf.psfex
plt.subplots_adjust(hspace=0.2, wspace=0.2)
psfex.plot_bases(autoscale=False)
plt.suptitle('PsfEx basis functions')
plt.savefig('psfex-bases.png')
psfex.plot_bases(stampsize=stampsz, autoscale=False)
plt.suptitle('PsfEx basis functions')
plt.savefig('psfex-bases2.png')
mx = psfex.psfbases.max()
kwa = dict(vmin=-0.1*mx, vmax=mx)
plt.subplots_adjust(hspace=0.0, wspace=0.0)
print('Plotting grid')
psfex.plot_grid(xx, yy, stampsize=stampsz, **kwa)
plt.suptitle('PsfEx grid')
plt.savefig('psfex-grid.png')
for i in range(psfex.nbases):
print('Plotting grid for parameter', i)
psfex.plot_grid(xx, yy, term=i, stampsize=stampsz, **kwa)
plt.savefig('psfex-term%i.png' % i)
if __name__ == '__main__':
main()
| bsd-3-clause |
handroissuazo/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 12 | 11510 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
h2educ/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <[email protected]>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
rsignell-usgs/notebook | system-test/Theme_1_Baseline/ioos_data_size.py | 1 | 7358 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# #Estimate how many TB of data are served by IOOS
# <markdowncell>
# Estimate dataset size from the OPeNDAP DDS. Here we use regular expressions to parse the DDS and just the variable size (32 or 64 bit Int or Float) by their shapes. This represents the size in memory, not on disk, since the data could be compressed. But the data in memory is in some sense a more true representation of the quantity of data available by the service.
# <codecell>
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import pandas as pd
import datetime as dt
import requests
import re
import time
from __future__ import print_function
# <codecell>
def service_urls(records,service_string='urn:x-esri:specification:ServiceType:odp:url'):
"""
Get all URLs matching a specific ServiceType
Unfortunately these seem to differ between different CSW-ISO services.
For example, OpenDAP is specified:
NODC geoportal: 'urn:x-esri:specification:ServiceType:OPeNDAP'
NGDC geoportal: 'urn:x-esri:specification:ServiceType:odp:url'
"""
urls=[]
for key,rec in records.iteritems():
#create a generator object, and iterate through it until the match is found
#if not found, gets the default value (here "none")
url = next((d['url'] for d in rec.references if d['scheme'] == service_string), None)
if url is not None:
urls.append(url)
return urls
# <markdowncell>
# ## Find OpenDAP endpoints from NGDC CSW
# <codecell>
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw' # NGDC/IOOS Geoportal
dap_timeout=4 # timeout for DAP response
csw_timeout=60 # timeout for CSW response
csw = CatalogueServiceWeb(endpoint,timeout=csw_timeout)
csw.version
# <codecell>
[op.name for op in csw.operations]
# <codecell>
csw.get_operation_by_name('GetRecords').constraints
# <codecell>
for oper in csw.operations:
print(oper.name)
# <codecell>
csw.get_operation_by_name('GetRecords').constraints
# <markdowncell>
# Since the supported ISO queryables contain `apiso:ServiceType`, we can use CSW to find all datasets with services that contain the string "dap"
# <codecell>
try:
csw.get_operation_by_name('GetDomain')
csw.getdomain('apiso:ServiceType', 'property')
print(csw.results['values'])
except:
print('GetDomain not supported')
# <markdowncell>
# Since this CSW service doesn't provide us a list of potential values for `apiso:ServiceType`, we guess `opendap`, which seems to work:
# <codecell>
val = 'opendap'
service_type = fes.PropertyIsLike(propertyname='apiso:ServiceType',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [ service_type]
# <codecell>
csw.getrecords2(constraints=filter_list,maxrecords=10000,esn='full')
len(csw.records.keys())
# <markdowncell>
# By printing out the references from a random record, we see that for this CSW the DAP URL is identified by
# `urn:x-esri:specification:ServiceType:odp:url`
# <codecell>
choice=random.choice(list(csw.records.keys()))
print(choice)
csw.records[choice].references
# <markdowncell>
# Get all the OPeNDAP endpoints
# <codecell>
dap_urls = service_urls(csw.records,service_string='urn:x-esri:specification:ServiceType:odp:url')
len(dap_urls)
# <codecell>
def calc_dsize(txt):
'''
Calculate dataset size from the OPeNDAP DDS.
Approx method: Multiply 32|64 bit Int|Float variables by their shape.
'''
# split the OpenDAP DDS on ';' characters
all = re.split(';',txt)
'''
Use regex to find numbers following Float or Int (e.g. Float32, Int64)
and also numbers immediately preceding a "]". The idea is that in line like:
Float32 Total_precipitation_surface_6_Hour_Accumulation[time2 = 74][y = 303][x = 491];
we want to find only the numbers that are not part of a variable or dimension name
(want to return [32, 74, 303, 491], *not* [32, 6, 2, 74, 303, 491])
'''
m = re.compile('\d+(?=])|(?<=Float)\d+|(?<=Int)\d+')
dsize=0
for var in all:
c = map(int,m.findall(var))
if len(c)>=2:
vsize = reduce(lambda x,y: x*y,c)
dsize += vsize
return dsize/1.0e6/8. # return megabytes
# <codecell>
def tot_dsize(url,dap_timeout=2):
das = url + '.dds'
tot = 0
try:
response = requests.get(das,verify=True, timeout=dap_timeout)
except:
return tot, -1
if response.status_code==200:
# calculate the total size for all variables:
tot = calc_dsize(response.text)
# calculate the size for MAPS variables and subtract from the total:
maps = re.compile('MAPS:(.*?)}',re.MULTILINE | re.DOTALL)
map_text = ''.join(maps.findall(response.text))
if map_text:
map_tot = calc_dsize(map_text)
tot -= map_tot
return tot,response.status_code
# <codecell>
time0 = time.time()
good_data=[]
bad_data=[]
count=0
for url in dap_urls:
count += 1
dtot, status_code = tot_dsize(url,dap_timeout=dap_timeout)
if status_code==200:
good_data.append([url,dtot])
print('[{}]Good:{},{}'.format(count,url,dtot), end='\r')
else:
bad_data.append([url,status_code])
print('[{}]Fail:{},{}'.format(count,url,status_code), end='\r')
print('Elapsed time={} minutes'.format((time.time()-time0)/60.))
# <codecell>
print('Elapsed time={} minutes'.format((time.time()-time0)/60.))
# <codecell>
len(bad_data)
# <codecell>
bad_data[0][0]
# <markdowncell>
# So how much data are we serving?
# <codecell>
sum=0
for ds in good_data:
sum +=ds[1]
print('{} terabytes'.format(sum/1.e6))
# <codecell>
url=[]
size=[]
for item in good_data:
url.append(item[0])
size.append(item[1])
# <codecell>
d={}
d['url']=url
d['size']=size
# <codecell>
good = pd.DataFrame(d)
# <codecell>
good.head()
# <codecell>
good=good.sort(['size'],ascending=0)
# <codecell>
good.head()
# <codecell>
url=[]
code=[]
for item in bad_data:
url.append(item[0])
code.append(item[1])
# <codecell>
d={}
d['url']=url
d['code']=code
bad = pd.DataFrame(d)
# <codecell>
bad.head()
# <codecell>
cd /usgs/data2/notebook/system-test/Theme_1_Baseline
# <codecell>
td = dt.datetime.today().strftime('%Y-%m-%d')
# <codecell>
bad.to_csv('bad'+td+'.csv')
# <codecell>
good.to_csv('good'+td+'.csv')
# <codecell>
bad=bad.sort(['url','code'],ascending=[0,0])
# <codecell>
bad = pd.read_csv('bad'+td+'.csv',index_col=0)
good = pd.read_csv('good'+td+'.csv',index_col=0)
# <codecell>
bad.head()
# <codecell>
recs = bad[bad['url'].str.contains('neracoos')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('ucar')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('tamu')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('axiom')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('caricoos')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('secoora')]
print(len(recs))
# <codecell>
recs = bad[bad['url'].str.contains('nanoos')]
print(len(recs))
# <codecell>
recs.to_csv('axiom.csv')
# <codecell>
!git add *.csv
# <codecell>
!git commit -m 'new csv'
# <codecell>
!git push
# <codecell>
| mit |
vorwerkc/pymatgen | pymatgen/analysis/graphs.py | 1 | 112413 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "Matthew Horton, Evan Spotte-Smith, Samuel Blau"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()["graphs"]
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
@classmethod
def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
sg.add_edge(
from_index,
to_index,
from_jimage=from_image,
to_jimage=to_image,
weight=weight,
edge_properties=props,
)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy, weights=False):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param weights: if True, use weights from local_env class
(consult relevant class for their meaning)
:return:
"""
if not strategy.structures_allowed:
raise ValueError(
"Chosen strategy is not designed for use with structures! " "Please choose another strategy."
)
sg = StructureGraph.with_empty_graph(structure, name="bonds")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(
from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"] if weights else None,
warn_duplicates=False,
)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
from_jimage=(0, 0, 0),
to_jimage=None,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(
self.structure[from_index].distance_and_image(self.structure[from_index], jimage=image)[0]
)
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(
self.structure[from_index].coords, dist, dist * 0.01, include_index=True
)
for nnsite in equiv_sites:
to_jimage = np.subtract(nnsite.frac_coords, self.structure[from_index].frac_coords)
to_jimage = np.round(to_jimage).astype(int)
self.add_edge(
from_index=from_index,
from_jimage=(0, 0, 0),
to_jimage=to_jimage,
to_index=nnsite.index,
)
return
# sanitize types
from_jimage, to_jimage = (
tuple(map(int, from_jimage)),
tuple(map(int, to_jimage)),
)
from_index, to_index = int(from_index), int(to_index)
# if edge is from site i to site i, constrain direction of edge
# this is a convention to avoid duplicate hops
if to_index == from_index:
if to_jimage == (0, 0, 0):
warnings.warn("Tried to create a bond to itself, " "this doesn't make sense so was ignored.")
return
# ensure that the first non-zero jimage index is positive
# assumes that at least one non-zero index is present
is_positive = [idx for idx in to_jimage if idx != 0][0] > 0
if not is_positive:
# let's flip the jimage,
# e.g. (0, 1, 0) is equivalent to (0, -1, 0) in this case
to_jimage = tuple(-idx for idx in to_jimage)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index, to_index, to_jimage)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d["weight"])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get("weight", None) for u, v, d in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy="omit")
return {
"all_weights": all_weights,
"min": stats.minmax[0],
"max": stats.minmax[1],
"mean": stats.mean,
"variance": stats.variance,
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d["structure"])
return cls(s, d["graphs"])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor["image"], [0, 0, 0]):
continue
if n > neighbor["site_index"]:
from_index = neighbor["site_index"]
to_index = n
else:
from_index = n
to_index = neighbor["site_index"]
mg.add_edge(
from_index=from_index,
to_index=to_index,
weight=neighbor["weight"],
warn_duplicates=False,
)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def get_disconnected_fragments(self):
"""
Determine if the MoleculeGraph is connected. If it is not, separate the
MoleculeGraph into different MoleculeGraphs, where each resulting
MoleculeGraph is a disconnected subgraph of the original.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:return: list of MoleculeGraphs
"""
if nx.is_weakly_connected(self.graph):
return [copy.deepcopy(self)]
original = copy.deepcopy(self)
sub_mols = list()
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i, n in enumerate(nodes):
mapping[n] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Molecules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError(
"Cannot split molecule; \
MoleculeGraph is still connected."
)
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
return original.get_disconnected_fragments()
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d["molecule"])
return cls(m, d["graphs"])
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
| mit |
adamrvfisher/TechnicalAnalysisLibrary | PriceRelativeRemoteSignalSmartPositionMAOptimizerTwoAsset.py | 1 | 6253 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
#Changes are pretty much immaterial, use VXV/VIX
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
Ticker2 = '^VIX'
#Remote Signal
Ticker3 = '^VIX'
#Here we go
Asset1 = YahooGrabber(Ticker1)
Asset2 = YahooGrabber(Ticker2)
#Remote Signal
Asset3 = YahooGrabber(Ticker3)
#Match lengths
#Trimmer
trim = abs(len(Asset1) - len(Asset2))
if len(Asset1) == len(Asset2):
pass
else:
if len(Asset1) > len(Asset2):
Asset1 = Asset1[trim:]
else:
Asset2 = Asset2[trim:]
Asset3 = Asset3[-len(Asset2):]
#Asset2 = Asset2[-600:]
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset2['LogRet'] = np.log(Asset2['Adj Close']/Asset2['Adj Close'].shift(1))
Asset2['LogRet'] = Asset2['LogRet'].fillna(0)
#Prepare the remote controller
Asset3['LogRet'] = np.log(Asset3['Adj Close']/Asset3['Adj Close'].shift(1))
Asset3['LogRet'] = Asset3['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 300000)
for i in iterations:
Counter = Counter + 1
a = rand.random()
b = 1 - a
c = rand.random()
d = rand.random()
if c + d > 1:
continue
e = rand.randint(3,30)
f = rand.randint(3,30)
g = rand.randint(0,30)
window = int(e)
window2 = int(f)
window3 = int(g)
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['Signal'] = np.where(Asset3['MA'].shift(1) > Asset3['Adj Close'].shift(1),
1, 0)
Asset3['CumulativeRollingSignal'] = Asset3['Signal'].rolling(window = window2).sum()
Asset1['Position'] = a
Asset1['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > window3, c, a)
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = b
Asset2['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > window3, d, b)
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = (Asset1['Pass']) * (-1) #Pass a short position
Portfolio['Asset2Pass'] = (Asset2['Pass']) #* (-1)
# Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = (Portfolio['Asset1Pass']) + (Portfolio['Asset2Pass'])
# Portfolio['LongShort'][-180:].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
if Portfolio['LongShort'].std() == 0:
continue
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
if MaxDD > float(.3):
continue
dailyreturn = Portfolio['LongShort'].mean()
if dailyreturn < .002:
continue
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
MaxDD = max(drawdown)
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(d)
Empty.append(e)
Empty.append(f)
Empty.append(g)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[8]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[8]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[8]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][4]))
window2 = int((Dataset[kfloat][5]))
Asset3['MA'] = Asset3['Adj Close'].rolling(window=window, center=False).mean()
Asset3['CumulativeRollingSignal'] = Asset3['Signal'].rolling(window = window2).sum()
Asset1['Position'] = Dataset[kfloat][0]
Asset1['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > Dataset[kfloat][5],
Dataset[kfloat][2], Dataset[kfloat][0])
Asset1['Pass'] = (Asset1['LogRet'] * Asset1['Position'])
Asset2['Position'] = Dataset[kfloat][1]
Asset2['SmartPosition'] = np.where(Asset3['CumulativeRollingSignal'] > Dataset[kfloat][5],
Dataset[kfloat][3], Dataset[kfloat][1])
Asset2['Pass'] = (Asset2['LogRet'] * Asset2['Position'])
Portfolio['Asset1Pass'] = Asset1['Pass'] * (-1)
Portfolio['Asset2Pass'] = Asset2['Pass'] #* (-1)
#Portfolio['PriceRelative'] = Asset1['Adj Close'] / Asset2['Adj Close']
#asone['PriceRelative'][-180:].plot(grid = True, figsize = (8,5))
Portfolio['LongShort'] = Portfolio['Asset1Pass'] + Portfolio['Asset2Pass']
Portfolio['LongShort'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Portfolio['LongShort'].mean()
dailyvol = Portfolio['LongShort'].std()
sharpe =(dailyreturn/dailyvol)
Portfolio['Multiplier'] = Portfolio['LongShort'].cumsum().apply(np.exp)
drawdown2 = 1 - Portfolio['Multiplier'].div(Portfolio['Multiplier'].cummax())
#conversionfactor = Portfolio['PriceRelative'][-1]
print(max(drawdown2))
#pd.to_pickle(Portfolio, 'VXX:UVXY') | apache-2.0 |
Priyansh2/test | ltrc/extractor/classification/test.py | 1 | 7909 | # -*- coding: utf-8 -*-
#! /usr/bin/env python3
from gensim import corpora, models
import gensim
from operator import itemgetter
import numpy as np
import sys
import os
import re
import codecs
import io
import math
from scipy import sparse
from sklearn.cross_validation import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.base import TransformerMixin
from sklearn import svm
from sklearn import metrics
from sklearn.pipeline import make_pipeline , Pipeline
reload(sys)
sys.setdefaultencoding('utf8')
np.set_printoptions(threshold='nan')
suffixes = {
1: ["ो", "े", "ू", "ु", "ी", "ि", "ा"],
2: ["कर", "ाओ", "िए", "ाई", "ाए", "ने", "नी", "ना", "ते", "ीं", "ती", "ता", "ाँ", "ां", "ों", "ें"],
3: ["ाकर", "ाइए", "ाईं", "ाया", "ेगी", "ेगा", "ोगी", "ोगे", "ाने", "ाना", "ाते", "ाती", "ाता", "तीं", "ाओं", "ाएं", "ुओं", "ुएं", "ुआं"],
4: ["ाएगी", "ाएगा", "ाओगी", "ाओगे", "एंगी", "ेंगी", "एंगे", "ेंगे", "ूंगी", "ूंगा", "ातीं", "नाओं", "नाएं", "ताओं", "ताएं", "ियाँ", "ियों", "ियां"],
5: ["ाएंगी", "ाएंगे", "ाऊंगी", "ाऊंगा", "ाइयाँ", "ाइयों", "ाइयां"],
}
categories=['A','C','D','E']
mappings={}
mappings['A']=1
mappings['C']=3
mappings['D']=4
mappings['E']=5
path='/home/priyansh/Downloads/ltrc/1055/'
train_data_path='/home/priyansh/Downloads/ltrc/extractor/clustering/four_class_devanagari/'
path1=train_data_path+"A/"
path2=train_data_path+"C/"
path3=train_data_path+"D/"
path4=train_data_path+"E/"
documents=[] #contains all doc filenames along with class labels
doc_info_with_label=[] #two tuple storage of doc info along with their respective labels
def hi_stem(word):
for L in 5, 4, 3, 2, 1:
if len(word) > L + 1:
for suf in suffixes[L]:
if word.endswith(suf):
return word[:-L]
return word
def store_data(dir_path_list):
for dir_path in dir_path_list:
class_name = dir_path.split("/")[8]
for filename in os.listdir(dir_path):
if filename not in documents:
documents.append(filename+"+"+str(mappings[class_name]))
infilename=os.path.join(dir_path,filename)
with codecs.open(infilename,'r','utf-8') as fl:
string=''
for line in fl:
for word in line.split():
if word!=" " or word!="\n":
string+=word+" "
fl.close()
temp=[]
temp.append(class_name)
temp.append(string)
doc_info_with_label.append(tuple(temp))
path_list=[]
path_list.append(path1)
path_list.append(path2)
#path_list.append(path3)
#path_list.append(path4)
store_data(path_list)
y = [d[0] for d in doc_info_with_label] #length is no:ofsamples
corpus = [d[1] for d in doc_info_with_label]
class feature_extractor(TransformerMixin):
def __init__(self,*featurizers):
self.featurizers = featurizers
def fit(self,X,y=None):
return self
def transform(self,X):
collection_features=[]
for f in self.featurizers:
collection_features.append(f(X))
feature_vect=np.array(collection_features[0])
if len(collection_features)>1:
for i in range(1,len(collection_features)):
feature_vect=np.concatenate((feature_vect,np.array(collection_features[i])),axis=1)
#print feature_vect.shape
return feature_vect.tolist()
def tfidf_score(word,document_no,corpus_data):
#print word
my_word=word
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
document=corpus_data[document_no]
#print document
wordcount=0
total=0
temp = document.split()
for i in temp:
#print i
if i not in stopwords:
total+=1
if i==my_word:
#print my_word
#print word
wordcount+=1
#print wordcount
#print total
tf = float(wordcount)/total
#print tf
#return tf(word,document)*idf(word,corpus_data)
total_docs = len(corpus_data)
count=0
for doc in corpus_data:
temp=[]
temp = doc.split()
for i in temp:
if i==word:
count+=1
break
total_docs_which_contains_the_words=count
idf = math.log(total_docs/(1+total_docs_which_contains_the_words))
return tf*idf
def tfidf(corpus_data):
word_id_mapping={}
cnt=0
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
unique_words_in_corpus={}
count=0
for data in corpus_data:
corpus_id=count
temp=[]
temp=data.split()
for word in temp:
if word not in unique_words_in_corpus:
unique_words_in_corpus[word]=corpus_id
count+=1
stopped_unique_words_in_corpus={}
for word in unique_words_in_corpus:
if word not in stopwords:
stopped_unique_words_in_corpus[word]=unique_words_in_corpus[word]
word_id_mapping[word]=cnt
cnt+=1
#print unique_words_in_corpus
#print stopped_unique_words_in_corpus
#print word_id_mapping
feature_vect=[None]*len(corpus_data)
#score_vect=[None]*cnt
for i in range(0,len(corpus_data)):
score_vect=[0]*cnt
for word in stopped_unique_words_in_corpus:
if i==stopped_unique_words_in_corpus[word]:
#print word
score=tfidf_score(word,i,corpus_data)
#print score
score_vect[word_id_mapping[word]]=score
feature_vect[i]=score_vect
return feature_vect
def lda(corpus_data):
stopwords_path='/home/priyansh/Downloads/ltrc/extractor/'
stop_words_filename='stopwords.txt'
stopwords=[] #contain all stopwords
with codecs.open(stopwords_path+stop_words_filename,'r','utf-8') as fl:
for line in fl:
for word in line.split():
stopwords.append(word)
fl.close()
texts=[]
for data in corpus_data:
#print data
tokens=[]
temp=[]
stopped_tokens=[]
temp = data.split()
for word in temp:
tokens.append(word)
#print tokens
for i in tokens:
if i not in stopwords:
stopped_tokens.append(i)
stemmed_tokens=[]
for token in stopped_tokens:
stemmed_token = hi_stem(token)
stemmed_tokens.append(stemmed_token)
texts.append(stemmed_tokens)
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
num_topics=5
ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=10)
doc_topics=[]
for doc_vector in corpus:
doc_topics.append(ldamodel[doc_vector])
for i in range(0,len(doc_topics)):
doc_topics[i] = sorted(doc_topics[i],key=itemgetter(1),reverse=True)
feature_vect=[]
for i in doc_topics:
prob_vect=[0]*num_topics
#print i
topic_num = i[0][0]
topic_prob = i[0][1]
prob_vect[topic_num]=topic_prob
feature_vect.append(prob_vect)
#print i
#print feature_vect
return feature_vect
my_featurizer = feature_extractor(tfidf)
X = my_featurizer.transform(corpus)
#X = sparse.csr_matrix(X)
X_train , X_test , y_train , y_test = train_test_split(X,y,test_size=0.2,random_state=42)
#pipe = make_pipeline(my_featurizer,svm.LinearSVC())
#pipe.fit(X_train,y_train)
#pred = pipe.predict(X_test)
clf = svm.SVC(kernel='linear')
clf.fit(X_train,y_train)
pred = clf.predict(X_test)
print "Expected output\n"
print y_test
print "\n"
print "Output\n"
print pred
print "\n"
score = clf.score(X_test,y_test)
print score
print "\n"
print metrics.confusion_matrix(pred,y_test)
| gpl-3.0 |
Nelca/buildMLSystem | ch02/figure4_5.py | 3 | 1999 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from knn import learn_model, apply_model, accuracy
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def train_plot(features, labels):
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 100)
Y = np.linspace(y0, y1, 100)
X, Y = np.meshgrid(X, Y)
model = learn_model(1, features[:, (0, 2)], np.array(labels))
C = apply_model(
np.vstack([X.ravel(), Y.ravel()]).T, model).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .6, .6), (.6, 1., .6), (.6, .6, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
plt.xlim(x0, x1)
plt.ylim(y0, y1)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[2])
plt.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.0, 1., .0), (.0, .0, 1.)])
plt.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
plt.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.))
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
train_plot(features, labels)
plt.savefig('../1400_02_04.png')
features -= features.mean(0)
features /= features.std(0)
train_plot(features, labels)
plt.savefig('../1400_02_05.png')
| mit |
r-owen/stui | TUI/Inst/GuideMonitor/GuideMonitorWindow.py | 1 | 12690 | #!/usr/bin/env python
"""Seeing monitor
History:
2010-10-01 ROwen Initial version.
2010-11-17 ROwen Added measured and applied offsets for all guider corrections.
Split RA, Dec and rotator into separate graphs.
Added net rotator offset.
2010-11-19 ROwen Display scaleFac as "percent": (scaleFac - 1) * 100
2010-11-22 ROwen Changed Scale scaling from 1e2 to 1e6.
2010-12-10 ROwen Reduced the memory leak by increasing updateInterval from its default value of 0.9 sec
to 10 seconds. Return to the default value again once the matplotlib bug is fixed.
2011-01-03 ROwen Modified to use new version of StripChartWdg.
Added measured FWHM to the seeing plot.
Added preliminary display of measured FWHM of each in-focus probe (no labelling).
2011-01-18 ROwen Net values are shown as steps, since that properly reflects reality.
2012-06-04 ROwen Fix clear button.
2013-03-21 ROwen Modified to use guider keyword gprobeBits instead of synthetic keyword fullGProbeBits
now that ticket #433 is fixed!
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import matplotlib
import RO.CnvUtil
import RO.PhysConst
import RO.Wdg
import TUI.Base.StripChartWdg
import TUI.Models
WindowName = "Inst.Guide Monitor"
def addWindow(tlSet):
"""Create the window for TUI.
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+434+22",
visible = False,
resizable = True,
wdgFunc = GuideMonitorWdg,
)
class GuideMonitorWdg(Tkinter.Frame):
"""Monitor guide corrections
"""
def __init__(self, master, timeRange=1800, width=9, height=9):
"""Create a GuideMonitorWdg
Inputs:
- master: parent Tk widget
- timeRange: range of time displayed (seconds)
- width: width of plot (inches)
- height: height of plot (inches)
"""
Tkinter.Frame.__init__(self, master)
self.tccModel = TUI.Models.getModel("tcc")
self.guiderModel = TUI.Models.getModel("guider")
self.probeInfoDict = dict() # dict of probe number (starting from 1): ProbeInfo
self.stripChartWdg = TUI.Base.StripChartWdg.StripChartWdg(
master = self,
timeRange = timeRange,
updateInterval = 10,
numSubplots = 6,
width = width,
height = height,
cnvTimeFunc = TUI.Base.StripChartWdg.TimeConverter(useUTC=True),
)
self.stripChartWdg.grid(row=0, column=0, sticky="nwes")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# the default ticks are not nice, so be explicit
self.stripChartWdg.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 61, 5)))
subplotInd = 0
# RA/Dec arc offset subplot
def arcsecFromPVT(val):
return 3600.0 * RO.CnvUtil.posFromPVT(val)
self.stripChartWdg.plotKeyVar(
label="RA net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.objArcOff,
keyInd=0,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="RA measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=0,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="RA applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=0,
color="green",
)
self.stripChartWdg.showY(-0.5, 0.5, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("RA Arc Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
self.stripChartWdg.plotKeyVar(
label="Dec net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.objArcOff,
keyInd=1,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Dec measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=1,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Dec applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=1,
color="green",
)
self.stripChartWdg.showY(-0.5, 0.5, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Dec Arc Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# rotator offset subplot
self.stripChartWdg.plotKeyVar(
label="Rot net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.guideOff,
keyInd=2,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Rot measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=2,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Rot applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=2,
color="green",
)
self.stripChartWdg.showY(-2.0, 2.0, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Rot Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# seeing subplot
self.seeingSubplotInd = subplotInd
self.stripChartWdg.plotKeyVar(
label="Measured",
subplotInd=subplotInd,
keyVar=self.guiderModel.fwhm,
keyInd=1,
color="blue",
)
self.stripChartWdg.plotKeyVar(
label="Theoretical",
subplotInd=subplotInd,
keyVar=self.guiderModel.seeing,
keyInd=0,
color="green",
)
self.stripChartWdg.showY(1.0, 1.2, subplotInd=subplotInd)
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Seeing (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# focus subplot
self.stripChartWdg.plotKeyVar(
label="Focus net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.secFocus,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Focus measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.focusError,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Focus applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.focusChange,
color="green",
)
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Focus (um)")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# scale subplot
def cnvAbsScale(val):
return (val - 1.0) * 1.0e6
def cnvDeltaScale(val):
return val * 1.0e6
self.stripChartWdg.plotKeyVar(
label="Scale net",
subplotInd=subplotInd,
keyVar=self.tccModel.scaleFac,
func=cnvAbsScale,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Scale measured err",
subplotInd=subplotInd,
func=cnvDeltaScale,
keyVar=self.guiderModel.scaleError,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Scale applied corr",
subplotInd=subplotInd,
func=cnvDeltaScale,
keyVar=self.guiderModel.scaleChange,
color="green",
)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Scale 1e6")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
self.guiderModel.probe.addCallback(self.probeCallback)
self.clearWdg = RO.Wdg.Button(master = self, text = "C", callFunc = self.clearCharts)
self.clearWdg.grid(row=0, column=0, sticky = "sw")
def cartridgeLoadedCallback(self, keyVar):
"""guider.cartridgeLoaded keyvar callback
When seen ditch all guide-probe-specific lines
"""
self.clearProbeInfo()
def clearCharts(self, wdg=None):
"""Clear all strip charts
"""
self.stripChartWdg.clear()
def clearProbeInfo(self):
"""Clear self.probeInfoDict and remove associated lines from plots
"""
for probeInfo in self.probeInfoDict.itervalues():
probeInfo.remove()
self.probeInfoDict = dict()
def probeCallback(self, keyVar):
"""guider.probe callback
If guide probe is broken, unused or out of focus do nothing. Otherwise:
- If probeInfo does not exist, create it and the associated plot line
- Plot data. If probe is disabled then plot "nan" so that no point shows
and lines remain broken if the probe is re-enabled later.
"""
# print "%s.probeCallback(%s)" % (self, keyVar)
if (not keyVar.isCurrent) or (not keyVar.isGenuine) or (keyVar[1] is None):
return
if (not self.guiderModel.gprobeBits.isCurrent) or (self.guiderModel.gprobeBits[0] is None):
return
probeNum = keyVar[1]
if self.guiderModel.gprobeBits[probeNum - 1] & 3 > 0:
# broken or unused
return
if abs(keyVar[6]) > 50:
# not an in-focus probe
return
probeInfo = self.probeInfoDict.get(probeNum)
if probeInfo is None:
probeInfo = ProbeInfo(num=probeNum, guideMonitorWdg=self)
self.probeInfoDict[probeNum] = probeInfo
probeInfo.plotData(keyVar)
class ProbeInfo(object):
def __init__(self, num, guideMonitorWdg):
"""Information about a guide probe, including lines on the strip chart
"""
self.num = int(num)
self.guiderModel = guideMonitorWdg.guiderModel
self.stripChartWdg = guideMonitorWdg.stripChartWdg
self.fwhmLine = self.stripChartWdg.addLine(
subplotInd=guideMonitorWdg.seeingSubplotInd,
color = "blue",
linestyle = "",
marker = ",",
)
def plotData(self, keyVar):
"""guider.probe callback
Plot data. If probe is disabled then plot "nan" so that no point shows
an lines remain broken if the probe is re-enabled later.
"""
# print "%s.probeCallback(%s)" % (self, keyVar)
if self.guiderModel.gprobeBits[self.num - 1] & 7 > 0:
# broken, unused or disabled; testing broken or unused is paranoia
# since this object should never have been created, but costs no extra time
# print "%s.plotData(%s); plot NaN" % (self, keyVar)
self.fwhmLine.addPoint(float("nan"))
else:
# print "%s.plotData(%s); plot %s" % (self, keyVar, keyVar[5])
self.fwhmLine.addPoint(keyVar[5])
def remove(self):
"""Remove all associated plot lines
"""
self.stripChartWdg.remove(self.fwhmLine)
def __str__(self):
return "ProbeInfo(%s)" % (self.num,)
if __name__ == "__main__":
import TestData
addWindow(TestData.tuiModel.tlSet)
TestData.tuiModel.tlSet.makeVisible(WindowName)
TestData.runTest()
TestData.tuiModel.reactor.run()
| bsd-3-clause |
btabibian/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
jundongl/PyFeaST | skfeature/example/test_CIFE.py | 3 | 1485 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import CIFE
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = CIFE.cife(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
Sealos/Sarcasm | Proyecto Final/prueba.py | 1 | 2337 | import numpy as np
from sklearn import datasets, svm
"""
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
np.unique(iris_y)
np.random.seed(0)
indices = np.random.permutation(len(iris_X))
iris_X_train = iris_X[indices[:-10]]
iris_y_train = iris_y[indices[:-10]]
iris_X_test = iris_X[indices[-10:]]
iris_y_test = iris_y[indices[-10:]]
svc = svm.SVC(kernel='rbf')
svc.fit(iris_X_train, iris_y_train)
print svc.predict(iris_X)
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
print iris.data[1,1]
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| gpl-2.0 |
tychon/physics | physics/datacursors.py | 1 | 6645 | # This module offers two Cursors:
# * DataCursor,
# where you have to click the data point, and
# * FollowDotCursor,
# where the bubble is always on the point
# nearest to your pointer.
#
# All the code was copied from
# http://stackoverflow.com/a/13306887
# DataCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# scat = ax.scatter(x, y)
# DataCursor(scat, x, y)
# plt.show()
# FollowDotCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# ax.scatter(x, y)
# cursor = FollowDotCursor(ax, x, y)
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial as spatial
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
def fmt(x, y):
return 'x: {x:0.2f}\ny: {y:0.2f}'.format(x=x, y=y)
# http://stackoverflow.com/a/4674445/190597
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, x = [], y = [], tolerance = 5, offsets = (-20, 20),
formatter = fmt, display_all = False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"formatter" is a callback function which takes 2 numeric arguments and
returns a string
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self._points = np.column_stack((x,y))
self.formatter = formatter
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.formatter, xy = (0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def snap(self, x, y):
"""Return the value in self._points closest to (x, y).
"""
idx = np.nanargmin(((self._points - (x,y))**2).sum(axis = -1))
return self._points[idx]
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
annotation = self.annotations[event.artist.axes]
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
annotation.set_visible(True)
event.canvas.draw()
class FollowDotCursor(object):
"""Display the x,y location of the nearest data point."""
def __init__(self, ax, x, y, tolerance=5, formatter=fmt, offsets=(-20, 20)):
try:
x = np.asarray(x, dtype='float')
except (TypeError, ValueError):
x = np.asarray(mdates.date2num(x), dtype='float')
y = np.asarray(y, dtype='float')
self._points = np.column_stack((x, y))
self.offsets = offsets
self.scale = x.ptp()
self.scale = y.ptp() / self.scale if self.scale else 1
self.tree = spatial.cKDTree(self.scaled(self._points))
self.formatter = formatter
self.tolerance = tolerance
self.ax = ax
self.fig = ax.figure
self.ax.xaxis.set_label_position('top')
self.dot = ax.scatter(
[x.min()], [y.min()], s=130, color='green', alpha=0.7)
self.annotation = self.setup_annotation()
plt.connect('motion_notify_event', self)
def scaled(self, points):
points = np.asarray(points)
return points * (self.scale, 1)
def __call__(self, event):
ax = self.ax
# event.inaxes is always the current axis. If you use twinx, ax could be
# a different axis.
if event.inaxes == ax:
x, y = event.xdata, event.ydata
elif event.inaxes is None:
return
else:
inv = ax.transData.inverted()
x, y = inv.transform([(event.x, event.y)]).ravel()
annotation = self.annotation
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
self.dot.set_offsets((x, y))
bbox = ax.viewLim
event.canvas.draw()
def setup_annotation(self):
"""Draw and hide the annotation box."""
annotation = self.ax.annotate(
'', xy=(0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(
boxstyle='round,pad=0.5', fc='yellow', alpha=0.75),
arrowprops = dict(
arrowstyle='->', connectionstyle='arc3,rad=0'))
return annotation
def snap(self, x, y):
"""Return the value in self.tree closest to x, y."""
dist, idx = self.tree.query(self.scaled((x, y)), k=1, p=1)
try:
return self._points[idx]
except IndexError:
# IndexError: index out of bounds
return self._points[0]
| mit |
yl565/statsmodels | statsmodels/tsa/vector_ar/dynamic.py | 4 | 10037 | # pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : Panel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
# TODO: Remove in favor of statsmodels implemetation
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as Panel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.Panel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return pd.DataFrame(data)
def forecast(self, steps=1):
"""
Produce dynamic forecast
Parameters
----------
steps
Returns
-------
forecasts : pandas.DataFrame
"""
output = np.empty((self.T - steps, self.neqs))
y_values = self.y.values
y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))
result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))
coefs = self._coefs_raw
intercepts = self._intercepts_raw
# can only produce this many forecasts
forc_index = self.result_index[steps:]
for i, date in enumerate(forc_index):
# TODO: check that this does the right thing in weird cases...
idx = y_index_map[date] - steps
result_idx = result_index_map[date] - steps
y_slice = y_values[:idx]
forcs = _model.forecast(y_slice, coefs[result_idx],
intercepts[result_idx], steps)
output[i] = forcs[-1]
return pd.DataFrame(output, index=forc_index, columns=self.names)
def plot_forecast(self, steps=1, figsize=(10, 10)):
"""
Plot h-step ahead forecasts against actual realizations of time
series. Note that forecasts are lined up with their respective
realizations.
Parameters
----------
steps :
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,
sharex=True)
forc = self.forecast(steps=steps)
dates = forc.index
y_overlay = self.y.reindex(dates)
for i, col in enumerate(forc.columns):
ax = axes[i]
y_ts = y_overlay[col]
forc_ts = forc[col]
y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)
forc_handle = ax.plot(dates, forc_ts.values, 'k-')
lines = (y_handle[0], forc_handle[0])
labels = ('Y', 'Forecast')
fig.legend(lines,labels)
fig.autofmt_xdate()
fig.suptitle('Dynamic %d-step forecast' % steps)
# pretty things up a bit
plotting.adjust_subplots(bottom=0.15, left=0.10)
plt.draw_if_interactive()
@property
def _is_rolling(self):
return self._window_type == ROLLING
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
data = dict((eq, r.r2) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
class DynamicPanelVAR(DynamicVAR):
"""
Dynamic (time-varying) panel vector autoregression using panel ordinary
least squares
Parameters
----------
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.neqs = len(data.columns)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _filter_data(lhs, rhs):
"""
Data filtering routine for dynamic VAR
lhs : DataFrame
original data
rhs : DataFrame
lagged variables
Returns
-------
"""
def _has_all_columns(df):
return np.isfinite(df.values).sum(1) == len(df.columns)
rhs_valid = _has_all_columns(rhs)
if not rhs_valid.all():
pre_filtered_rhs = rhs[rhs_valid]
else:
pre_filtered_rhs = rhs
index = lhs.index.union(rhs.index)
if not index.equals(rhs.index) or not index.equals(lhs.index):
rhs = rhs.reindex(index)
lhs = lhs.reindex(index)
rhs_valid = _has_all_columns(rhs)
lhs_valid = _has_all_columns(lhs)
valid = rhs_valid & lhs_valid
if not valid.all():
filt_index = rhs.index[valid]
filtered_rhs = rhs.reindex(filt_index)
filtered_lhs = lhs.reindex(filt_index)
else:
filtered_rhs, filtered_lhs = rhs, lhs
return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid
def _make_lag_matrix(x, lags):
data = {}
columns = []
for i in range(1, 1 + lags):
lagstr = 'L%d.'% i
lag = x.shift(i).rename(columns=lambda c: lagstr + c)
data.update(lag._series)
columns.extend(lag.columns)
return pd.DataFrame(data, columns=columns)
class Equation(object):
"""
Stub, estimate one equation
"""
def __init__(self, y, x):
pass
if __name__ == '__main__':
import pandas.util.testing as ptest
ptest.N = 500
data = ptest.makeTimeDataFrame().cumsum(0)
var = DynamicVAR(data, lag_order=2, window_type='expanding')
var2 = DynamicVAR(data, lag_order=2, window=10,
window_type='rolling')
| bsd-3-clause |
imaculate/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 55 | 7386 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.org/basemap>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
umuzungu/zipline | zipline/data/us_equity_loader.py | 3 | 11226 | # Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
from cachetools import LRUCache
from numpy import around, hstack
from pandas.tslib import normalize_date
from six import with_metaclass
from zipline.lib._float64window import AdjustedArrayWindow as Float64Window
from zipline.lib.adjustment import Float64Multiply
from zipline.utils.cache import ExpiringCache
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import float64_dtype
class SlidingWindow(object):
"""
Wrapper around an AdjustedArrayWindow which supports monotonically
increasing (by datetime) requests for a sized window of data.
Parameters
----------
window : AdjustedArrayWindow
Window of pricing data with prefetched values beyond the current
simulation dt.
cal_start : int
Index in the overall calendar at which the window starts.
"""
def __init__(self, window, size, cal_start, offset):
self.window = window
self.cal_start = cal_start
self.current = around(next(window), 3)
self.offset = offset
self.most_recent_ix = self.cal_start + size
def get(self, end_ix):
"""
Returns
-------
out : A np.ndarray of the equity pricing up to end_ix after adjustments
and rounding have been applied.
"""
if self.most_recent_ix == end_ix:
return self.current
target = end_ix - self.cal_start - self.offset + 1
self.current = around(self.window.seek(target), 3)
self.most_recent_ix = end_ix
return self.current
class USEquityHistoryLoader(with_metaclass(ABCMeta)):
"""
Loader for sliding history windows of adjusted US Equity Pricing data.
Parameters
----------
reader : DailyBarReader, MinuteBarReader
Reader for pricing bars.
adjustment_reader : SQLiteAdjustmentReader
Reader for adjustment data.
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
def __init__(self, env, reader, adjustment_reader, sid_cache_size=1000):
self.env = env
self._reader = reader
self._adjustments_reader = adjustment_reader
self._window_blocks = {
field: ExpiringCache(LRUCache(maxsize=sid_cache_size))
for field in self.FIELDS
}
@abstractproperty
def _prefetch_length(self):
pass
@abstractproperty
def _calendar(self):
pass
@abstractmethod
def _array(self, start, end, assets, field):
pass
def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
days : iterable of datetime64-like
The days for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[end_loc].append(mult)
except KeyError:
adjs[end_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[end_loc].append(mult)
except KeyError:
adjs[end_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[end_loc].append(mult)
except KeyError:
adjs[end_loc] = [mult]
return adjs
def _ensure_sliding_windows(self, assets, dts, field):
"""
Ensure that there is a Float64Multiply window for each asset that can
provide data for the given parameters.
If the corresponding window for the (assets, len(dts), field) does not
exist, then create a new one.
If a corresponding window does exist for (assets, len(dts), field), but
can not provide data for the current dts range, then create a new
one and replace the expired window.
Parameters
----------
assets : iterable of Assets
The assets in the window
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
Returns
-------
out : list of Float64Window with sufficient data so that each asset's
window can provide `get` for the index corresponding with the last
value in `dts`
"""
end = dts[-1]
size = len(dts)
asset_windows = {}
needed_assets = []
for asset in assets:
try:
asset_windows[asset] = self._window_blocks[field].get(
(asset, size), end)
except KeyError:
needed_assets.append(asset)
if needed_assets:
start = dts[0]
offset = 0
start_ix = self._calendar.get_loc(start)
end_ix = self._calendar.get_loc(end)
cal = self._calendar
prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)
prefetch_end = cal[prefetch_end_ix]
prefetch_dts = cal[start_ix:prefetch_end_ix + 1]
prefetch_len = len(prefetch_dts)
array = self._array(prefetch_dts, needed_assets, field)
view_kwargs = {}
if field == 'volume':
array = array.astype(float64_dtype)
for i, asset in enumerate(needed_assets):
if self._adjustments_reader:
adjs = self._get_adjustments_in_range(
asset, prefetch_dts, field)
else:
adjs = {}
window = Float64Window(
array[:, i].reshape(prefetch_len, 1),
view_kwargs,
adjs,
offset,
size
)
sliding_window = SlidingWindow(window, size, start_ix, offset)
asset_windows[asset] = sliding_window
self._window_blocks[field].set((asset, size),
sliding_window,
prefetch_end)
return [asset_windows[asset] for asset in assets]
def history(self, assets, dts, field):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets, dts, field)
end_ix = self._calendar.get_loc(dts[-1])
return hstack([window.get(end_ix) for window in block])
class USEquityDailyHistoryLoader(USEquityHistoryLoader):
@property
def _prefetch_length(self):
return 40
@property
def _calendar(self):
return self._reader._calendar
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
class USEquityMinuteHistoryLoader(USEquityHistoryLoader):
@property
def _prefetch_length(self):
return 1560
@lazyval
def _calendar(self):
mm = self.env.market_minutes
return mm[mm.slice_indexer(start=self._reader.first_trading_day,
end=self._reader.last_available_dt)]
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
| apache-2.0 |
cdegroc/scikit-learn | sklearn/datasets/lfw.py | 6 | 16362 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by refering to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: Simplified BSD
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
"is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=None, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
person_folder = join(data_folder_path, name)
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
sposs/DIRAC | Core/Utilities/Graphs/Legend.py | 11 | 7713 | ########################################################################
# $HeadURL$
########################################################################
""" Legend encapsulates a graphical plot legend drawing tool
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
from DIRAC.Core.Utilities.Graphs.Palette import Palette
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import types
class Legend:
def __init__(self,data=None,axes=None,*aw,**kw):
self.labels = {}
if type(data) == types.DictType:
for label,ddict in data.items():
#self.labels[label] = pretty_float(max([ float(x) for x in ddict.values() if x ]) )
self.labels[label] = "%.1f" % max([ float(x) for x in ddict.values() if x ])
elif type(data) == types.InstanceType and data.__class__ == GraphData:
self.labels = data.getLabels()
else:
self.labels = data
#self.labels.reverse()
self.ax = axes
self.canvas = None
if self.ax:
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
self.prefs = evalPrefs(*aw,**kw)
self.palette = Palette()
if self.labels and self.labels[0][0] != 'NoLabels':
percent_flag = self.prefs.get('legend_unit','')
if percent_flag == "%":
sum_value = sum(data.label_values)
if sum_value > 0.:
self.labels = [(l,v/sum_value*100.) for l,v in self.labels ]
self.__get_column_width()
def dumpPrefs(self):
for key in self.prefs:
print key.rjust(20),':',str(self.prefs[key]).ljust(40)
def setLabels(self,labels):
self.labels = labels
def setAxes(self,axes):
self.ax = axes
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
def getLegendSize(self):
self.__get_column_width()
legend_position = self.prefs['legend_position']
legend_width = float(self.prefs['legend_width'])
legend_height = float(self.prefs['legend_height'])
legend_padding = float(self.prefs['legend_padding'])
legend_text_size = self.prefs.get('legend_text_size',self.prefs['text_size'])
legend_text_padding = self.prefs.get('legend_text_padding',self.prefs['text_padding'])
if legend_position in ['right','left']:
# One column in case of vertical legend
legend_width = self.column_width+legend_padding
nLabels = len(self.labels)
legend_max_height = nLabels*(legend_text_size+legend_text_padding)
elif legend_position == 'bottom':
nColumns = min(self.prefs['legend_max_columns'],int(legend_width/self.column_width))
nLabels = len(self.labels)
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(legend_height/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
text_padding = self.prefs['text_padding']
text_padding = pixelToPoint(text_padding,self.prefs['dpi'])
legend_height = min(legend_height,(nRows*(self.text_size+text_padding)+text_padding))
legend_max_height = nLabels*(self.text_size+text_padding)
return legend_width,legend_height,legend_max_height
def __get_legend_text_size(self):
dpi = self.prefs['dpi']
text_size = self.prefs['text_size']
text_padding = self.prefs['text_padding']
legend_text_size = self.prefs.get('legend_text_size',text_size)
legend_text_padding = self.prefs.get('legend_text_padding',text_padding)
return legend_text_size,legend_text_padding
def __get_column_width(self):
max_length = 0
max_column_text = ''
flag = self.prefs.get('legend_numbers',True)
unit = self.prefs.get('legend_unit',False)
for label,num in self.labels:
if not flag: num = None
if num is not None:
column_length = len(str(label)+str(num)) + 1
else:
column_length = len(str(label)) + 1
if column_length > max_length:
max_length = column_length
if flag:
if type(num) == types.IntType or type(num) == types.LongType:
numString = str(num)
else:
numString = "%.1f" % float(num)
max_column_text = '%s %s' % (str(label),numString)
if unit:
max_column_text += "%"
else:
max_column_text = '%s ' % str(label)
figure = Figure()
canvas = FigureCanvasAgg(figure)
dpi = self.prefs['dpi']
figure.set_dpi( dpi )
l_size,l_padding = self.__get_legend_text_size()
self.text_size = pixelToPoint(l_size,dpi)
text = Text(0.,0.,text=max_column_text,size=self.text_size)
text.set_figure(figure)
bbox = text.get_window_extent(canvas.get_renderer())
self.column_width = bbox.width+6*l_size
def draw(self):
dpi = self.prefs['dpi']
ax_xsize = self.ax.get_window_extent().width
ax_ysize = self.ax.get_window_extent().height
nLabels = len(self.labels)
nColumns = min(self.prefs['legend_max_columns'],int(ax_xsize/self.column_width))
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(ax_ysize/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
maxLabels = nColumns*nRows - 1
self.ax.set_xlim(0.,float(ax_xsize))
self.ax.set_ylim(-float(ax_ysize),0.)
legend_text_size,legend_text_padding = self.__get_legend_text_size()
legend_text_size_point = pixelToPoint(legend_text_size,dpi)
box_width = legend_text_size
legend_offset = (ax_xsize - nColumns*self.column_width)/2
nc = 0
#self.labels.reverse()
for label,num in self.labels:
num_flag = self.prefs.get('legend_numbers',True)
percent_flag = self.prefs.get('legend_unit','')
if num_flag:
if percent_flag == "%":
num = "%.1f" % num +'%'
else:
num = "%.1f" % num
else:
num = None
color = self.palette.getColor(label)
row = nc%nRows
column = nc/nRows
if row == nRows-1 and column == nColumns-1 and nc != nLabels-1:
last_text = '... plus %d more' % (nLabels-nc)
self.ax.text(float(column*self.column_width)+legend_offset,-float(row*1.6*box_width),
last_text,horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
break
else:
self.ax.text(float(column*self.column_width)+2.*box_width+legend_offset,-row*1.6*box_width,
str(label),horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
if num is not None:
self.ax.text(float((column+1)*self.column_width)-2*box_width+legend_offset,-float(row*1.6*box_width),
str(num),horizontalalignment='right',
verticalalignment='top',size=legend_text_size_point)
box = Rectangle((float(column*self.column_width)+legend_offset,-float(row*1.6*box_width)-box_width),
box_width,box_width)
box.set_ec('black')
box.set_linewidth(pixelToPoint(0.5,dpi))
box.set_fc(color)
self.ax.add_patch(box)
nc += 1
| gpl-3.0 |
selective-inference/selective-inference | doc/learning_examples/riboflavin/CV.py | 3 | 10083 | import functools, hashlib
import numpy as np
from scipy.stats import norm as normal_dbn
import regreg.api as rr
from selection.algorithms.debiased_lasso import pseudoinverse_debiasing_matrix
# load in the X matrix
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(hdi); data(riboflavin); X = riboflavin$x')
numpy2ri.activate()
X_full = np.asarray(rpy.r('X'))
numpy2ri.deactivate()
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit, repeat_selection, infer_set_target
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
from selection.learning.learners import mixture_learner
def highdim_model_inference(X,
y,
truth,
selection_algorithm,
sampler,
lam_min,
dispersion,
success_params=(1, 1),
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
alpha=0.1,
B=2000,
naive=True,
learner_klass=mixture_learner,
how_many=None):
n, p = X.shape
XTX = X.T.dot(X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
# run selection algorithm
observed_set = repeat_selection(selection_algorithm, sampler, *success_params)
observed_list = sorted(observed_set)
# observed debiased LASSO estimate
loss = rr.squared_error(X, y)
pen = rr.l1norm(p, lagrange=lam_min)
problem = rr.simple_problem(loss, pen)
soln = problem.solve()
grad = X.T.dot(X.dot(soln) - y) # gradient at beta_hat
M = pseudoinverse_debiasing_matrix(X,
observed_list)
observed_target = soln[observed_list] - M.dot(grad)
tmp = X.dot(M.T)
target_cov = tmp.T.dot(tmp) * dispersion
cross_cov = np.identity(p)[:,observed_list] * dispersion
if len(observed_list) > 0:
if how_many is None:
how_many = len(observed_list)
observed_list = observed_list[:how_many]
# find the target, based on the observed outcome
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
true_target = truth[observed_list]
results = infer_set_target(selection_algorithm,
observed_set,
observed_list,
sampler,
observed_target,
target_cov,
cross_cov,
hypothesis=true_target,
fit_probability=fit_probability,
fit_args=fit_args,
success_params=success_params,
alpha=alpha,
B=B,
learner_klass=learner_klass)
for i, result in enumerate(results):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target[i]) * (interval[1] > true_target[i]))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
if len(pvalues) > 0:
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'id':[instance_id]*len(pvalues),
'target':true_target,
'variable':observed_list,
'B':[B]*len(pvalues)})
if naive:
(naive_pvalues,
naive_pivots,
naive_covered,
naive_lengths,
naive_upper,
naive_lower) = [], [], [], [], [], []
for j, idx in enumerate(observed_list):
true_target = truth[idx]
target_sd = np.sqrt(target_cov[j, j])
observed_target_j = observed_target[j]
quantile = normal_dbn.ppf(1 - 0.5 * alpha)
naive_interval = (observed_target_j - quantile * target_sd,
observed_target_j + quantile * target_sd)
naive_upper.append(naive_interval[1])
naive_lower.append(naive_interval[0])
naive_pivot = (1 - normal_dbn.cdf((observed_target_j - true_target) / target_sd))
naive_pivot = 2 * min(naive_pivot, 1 - naive_pivot)
naive_pivots.append(naive_pivot)
naive_pvalue = (1 - normal_dbn.cdf(observed_target_j / target_sd))
naive_pvalue = 2 * min(naive_pvalue, 1 - naive_pvalue)
naive_pvalues.append(naive_pvalue)
naive_covered.append((naive_interval[0] < true_target) * (naive_interval[1] > true_target))
naive_lengths.append(naive_interval[1] - naive_interval[0])
naive_df = pd.DataFrame({'naive_pivot':naive_pivots,
'naive_pvalue':naive_pvalues,
'naive_coverage':naive_covered,
'naive_length':naive_lengths,
'naive_upper':naive_upper,
'naive_lower':naive_lower,
'variable':observed_list,
})
df = pd.merge(df, naive_df, on='variable')
return df
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth *= sigma / np.sqrt(n)
y = X.dot(truth) + sigma * np.random.standard_normal(n)
lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed)
lam_min, lam_1se = n * lam_min, n * lam_1se
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler.center.copy()
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = highdim_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
lam_min,
sigma**2, # dispersion assumed known for now
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if df is not None:
liu_df = liu_inference(X,
y,
1.00001 * lam_min,
dispersion,
truth,
alpha=alpha,
approximate_inverse='BN')
return pd.merge(df, liu_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'riboflavin_CV.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
liu_pivot = df['liu_pivot']
liu_pivot = liu_pivot[~np.isnan(liu_pivot)]
pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV',
linewidth=3)
pivot_ax.legend()
fig = pivot_ax.figure
fig.savefig(csvfile[:-4] + '.pdf')
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/numpy/lib/twodim_base.py | 34 | 25580 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| gpl-3.0 |
abhisheksugam/Climate_Police | Climate_Police/tests/plot_pollutants.py | 1 | 3256 |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
from plotly.graph_objs import *
import plotly.tools as tls
import seaborn as sns
import plotly
plotly.offline.init_notebook_mode()
def plot_pollutants(df, year, state):
#split the date into three columns
df["year"], df["month"], df["day"] = zip(*df["Date Local"].apply(lambda x: x.split('-', 2)))
#multiindex dataframe with Year and State and groupby mean
df2 = df.groupby(['year', 'State']).mean()
#removed useless columns
del df2['State Code']
del df2['County Code']
del df2['Site Num']
del df2['Unnamed: 0']
#create a new dataframe with the users input
df3 = df2.loc[year, state]
df4 = df3.round(4)
# plot all levels of pollutants per Year and State
trace1 = go.Scatter(
x=df4.index[0:4], y=df4[0:4],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#FFD700',
width=3
),
name='NO2'
)
trace2 = go.Scatter(
x=df4.index[4:8], y=df4[4:8],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#C0C0C0',
width=3
),
name='O3'
)
trace3 = go.Scatter(
x=df4.index[8:12], y=df4[8:12],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#BA8651',
width=3
),
name='SO2'
)
trace4 = go.Scatter(
x=df4.index[12:16], y=df4[12:16],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#000000',
width=4
),
name='CO'
)
data = Data([ trace1, trace2, trace3, trace4])
layout = Layout(
title='Levels of pollutants in ' + state + ". " + "Year: " + year,
updatemenus=list([
dict(
x=-0.05,
y=1,
yanchor='top',
buttons=list([
dict(
args=['visible', [True, True, True, True]],
label='All',
method='restyle'
),
dict(
args=['visible', [True, False, False, False]],
label='NO2',
method='restyle'
),
dict(
args=['visible', [False, True, False, False]],
label='O3',
method='restyle'
),
dict(
args=['visible', [False, False, True, False]],
label='SO2',
method='restyle'
),
dict(
args=['visible', [False, False, False, True]],
label='CO',
method='restyle'
)
]),
)
]),
)
fig = Figure(data=data, layout=layout)
py.iplot(fig)
plotSuccessful= "Levels of pollutants plotted."
return fig, plotSuccessful
| mit |
Tong-Chen/scikit-learn | examples/svm/plot_separating_hyperplane.py | 12 | 1252 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
| bsd-3-clause |
RomainBrault/scikit-learn | sklearn/grid_search.py | 5 | 40816 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.inverse_transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
abhitopia/tensorflow | tensorflow/contrib/learn/python/learn/estimators/__init__.py | 17 | 12228 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
| apache-2.0 |
eglxiang/Med | test_transformationOutput.py | 1 | 8775 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 13 12:10:33 2017
@author: apezeshk
"""
from matplotlib import pyplot as plt
import numpy as np
#Use this to display a single slice of the original/transformed patch in one figure,
#and every slice of the original and the transformed copies in another figure. This can be used
#to verify the images look right both in terms of the transformations as well as the cropping
#s.t. the nodule is centered in the patch.
#The last few lines can also be used to verify that the simple rotation/flipped augmentations
#look alright, and that they are all different with one another.
#nodOrig = np.load('/diskStation/LIDC/36368/pos_36368_test/p0451_20000101_s3000315_0.npy')
nodOld = np.load('/diskStation/LIDC/36368/pos_36368/p1012_20000101_s32231_0.npy').astype('int16')
nodTrans1 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m00.npy')
nodTrans2 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m01.npy')
nodTrans3 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m02.npy')
nodTrans4 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m03.npy')
nodTrans5 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m04.npy')
nodTrans6 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m05.npy')
nodTrans7 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m06.npy')
nodTrans8 = np.load('/diskStation/LIDC/36368/pos_aug_aux_36368/p1012_20000101_s32231_0_m07.npy')
i = 4; #slice number to be shown in the first figure
plt.figure();
plt.subplot(1,5,1); plt.imshow(nodOld[:,:,i], cmap = 'gray');
plt.subplot(1,5,2); plt.imshow(nodTrans1[:,:,i], cmap = 'gray');
plt.subplot(1,5,3); plt.imshow(nodTrans2[:,:,i], cmap = 'gray');
plt.subplot(1,5,4); plt.imshow(nodTrans3[:,:,i], cmap = 'gray');
plt.subplot(1,5,5); plt.imshow(nodTrans4[:,:,i], cmap = 'gray');
plt.figure()
plt.subplot(8,5,1); plt.imshow(nodOld[:,:,0], cmap = 'gray');
plt.subplot(8,5,6); plt.imshow(nodOld[:,:,1], cmap = 'gray');
plt.subplot(8,5,11); plt.imshow(nodOld[:,:,2], cmap = 'gray');
plt.subplot(8,5,16); plt.imshow(nodOld[:,:,3], cmap = 'gray');
plt.subplot(8,5,21); plt.imshow(nodOld[:,:,4], cmap = 'gray');
plt.subplot(8,5,26); plt.imshow(nodOld[:,:,5], cmap = 'gray');
plt.subplot(8,5,31); plt.imshow(nodOld[:,:,6], cmap = 'gray');
plt.subplot(8,5,36); plt.imshow(nodOld[:,:,7], cmap = 'gray');
plt.subplot(8,5,2); plt.imshow(nodTrans1[:,:,0], cmap = 'gray');
plt.subplot(8,5,7); plt.imshow(nodTrans1[:,:,1], cmap = 'gray');
plt.subplot(8,5,12); plt.imshow(nodTrans1[:,:,2], cmap = 'gray');
plt.subplot(8,5,17); plt.imshow(nodTrans1[:,:,3], cmap = 'gray');
plt.subplot(8,5,22); plt.imshow(nodTrans1[:,:,4], cmap = 'gray');
plt.subplot(8,5,27); plt.imshow(nodTrans1[:,:,5], cmap = 'gray');
plt.subplot(8,5,32); plt.imshow(nodTrans1[:,:,6], cmap = 'gray');
plt.subplot(8,5,37); plt.imshow(nodTrans1[:,:,7], cmap = 'gray');
plt.subplot(8,5,3); plt.imshow(nodTrans2[:,:,0], cmap = 'gray');
plt.subplot(8,5,8); plt.imshow(nodTrans2[:,:,1], cmap = 'gray');
plt.subplot(8,5,13); plt.imshow(nodTrans2[:,:,2], cmap = 'gray');
plt.subplot(8,5,18); plt.imshow(nodTrans2[:,:,3], cmap = 'gray');
plt.subplot(8,5,23); plt.imshow(nodTrans2[:,:,4], cmap = 'gray');
plt.subplot(8,5,28); plt.imshow(nodTrans2[:,:,5], cmap = 'gray');
plt.subplot(8,5,33); plt.imshow(nodTrans2[:,:,6], cmap = 'gray');
plt.subplot(8,5,38); plt.imshow(nodTrans2[:,:,7], cmap = 'gray');
plt.subplot(8,5,4); plt.imshow(nodTrans3[:,:,0], cmap = 'gray');
plt.subplot(8,5,9); plt.imshow(nodTrans3[:,:,1], cmap = 'gray');
plt.subplot(8,5,14); plt.imshow(nodTrans3[:,:,2], cmap = 'gray');
plt.subplot(8,5,19); plt.imshow(nodTrans3[:,:,3], cmap = 'gray');
plt.subplot(8,5,24); plt.imshow(nodTrans3[:,:,4], cmap = 'gray');
plt.subplot(8,5,29); plt.imshow(nodTrans3[:,:,5], cmap = 'gray');
plt.subplot(8,5,34); plt.imshow(nodTrans3[:,:,6], cmap = 'gray');
plt.subplot(8,5,39); plt.imshow(nodTrans3[:,:,7], cmap = 'gray');
plt.subplot(8,5,5); plt.imshow(nodTrans4[:,:,0], cmap = 'gray');
plt.subplot(8,5,10); plt.imshow(nodTrans4[:,:,1], cmap = 'gray');
plt.subplot(8,5,15); plt.imshow(nodTrans4[:,:,2], cmap = 'gray');
plt.subplot(8,5,20); plt.imshow(nodTrans4[:,:,3], cmap = 'gray');
plt.subplot(8,5,25); plt.imshow(nodTrans4[:,:,4], cmap = 'gray');
plt.subplot(8,5,30); plt.imshow(nodTrans4[:,:,5], cmap = 'gray');
plt.subplot(8,5,35); plt.imshow(nodTrans4[:,:,6], cmap = 'gray');
plt.subplot(8,5,40); plt.imshow(nodTrans4[:,:,7], cmap = 'gray');
plt.figure()
plt.subplot(8,5,1); plt.imshow(nodOld[:,:,0], cmap = 'gray');
plt.subplot(8,5,6); plt.imshow(nodOld[:,:,1], cmap = 'gray');
plt.subplot(8,5,11); plt.imshow(nodOld[:,:,2], cmap = 'gray');
plt.subplot(8,5,16); plt.imshow(nodOld[:,:,3], cmap = 'gray');
plt.subplot(8,5,21); plt.imshow(nodOld[:,:,4], cmap = 'gray');
plt.subplot(8,5,26); plt.imshow(nodOld[:,:,5], cmap = 'gray');
plt.subplot(8,5,31); plt.imshow(nodOld[:,:,6], cmap = 'gray');
plt.subplot(8,5,36); plt.imshow(nodOld[:,:,7], cmap = 'gray');
plt.subplot(8,5,2); plt.imshow(nodTrans5[:,:,0], cmap = 'gray');
plt.subplot(8,5,7); plt.imshow(nodTrans5[:,:,1], cmap = 'gray');
plt.subplot(8,5,12); plt.imshow(nodTrans5[:,:,2], cmap = 'gray');
plt.subplot(8,5,17); plt.imshow(nodTrans5[:,:,3], cmap = 'gray');
plt.subplot(8,5,22); plt.imshow(nodTrans5[:,:,4], cmap = 'gray');
plt.subplot(8,5,27); plt.imshow(nodTrans5[:,:,5], cmap = 'gray');
plt.subplot(8,5,32); plt.imshow(nodTrans5[:,:,6], cmap = 'gray');
plt.subplot(8,5,37); plt.imshow(nodTrans5[:,:,7], cmap = 'gray');
plt.subplot(8,5,3); plt.imshow(nodTrans6[:,:,0], cmap = 'gray');
plt.subplot(8,5,8); plt.imshow(nodTrans6[:,:,1], cmap = 'gray');
plt.subplot(8,5,13); plt.imshow(nodTrans6[:,:,2], cmap = 'gray');
plt.subplot(8,5,18); plt.imshow(nodTrans6[:,:,3], cmap = 'gray');
plt.subplot(8,5,23); plt.imshow(nodTrans6[:,:,4], cmap = 'gray');
plt.subplot(8,5,28); plt.imshow(nodTrans6[:,:,5], cmap = 'gray');
plt.subplot(8,5,33); plt.imshow(nodTrans6[:,:,6], cmap = 'gray');
plt.subplot(8,5,38); plt.imshow(nodTrans6[:,:,7], cmap = 'gray');
plt.subplot(8,5,4); plt.imshow(nodTrans7[:,:,0], cmap = 'gray');
plt.subplot(8,5,9); plt.imshow(nodTrans7[:,:,1], cmap = 'gray');
plt.subplot(8,5,14); plt.imshow(nodTrans7[:,:,2], cmap = 'gray');
plt.subplot(8,5,19); plt.imshow(nodTrans7[:,:,3], cmap = 'gray');
plt.subplot(8,5,24); plt.imshow(nodTrans7[:,:,4], cmap = 'gray');
plt.subplot(8,5,29); plt.imshow(nodTrans7[:,:,5], cmap = 'gray');
plt.subplot(8,5,34); plt.imshow(nodTrans7[:,:,6], cmap = 'gray');
plt.subplot(8,5,39); plt.imshow(nodTrans7[:,:,7], cmap = 'gray');
plt.subplot(8,5,5); plt.imshow(nodTrans8[:,:,0], cmap = 'gray');
plt.subplot(8,5,10); plt.imshow(nodTrans8[:,:,1], cmap = 'gray');
plt.subplot(8,5,15); plt.imshow(nodTrans8[:,:,2], cmap = 'gray');
plt.subplot(8,5,20); plt.imshow(nodTrans8[:,:,3], cmap = 'gray');
plt.subplot(8,5,25); plt.imshow(nodTrans8[:,:,4], cmap = 'gray');
plt.subplot(8,5,30); plt.imshow(nodTrans8[:,:,5], cmap = 'gray');
plt.subplot(8,5,35); plt.imshow(nodTrans8[:,:,6], cmap = 'gray');
plt.subplot(8,5,40); plt.imshow(nodTrans8[:,:,7], cmap = 'gray');
################################################################################
# This part is to display the regular augmentations (simple flip/rotations)
################################################################################
plt.figure;
nodSimple1 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_r11.npy')
nodSimple2 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_r21.npy')
nodSimple3 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_r31.npy')
nodSimple4 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_f11.npy')
nodSimple5 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_f12.npy')
nodSimple6 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_b11.npy')
nodSimple7 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_b12.npy')
nodSimple8 = np.load('/diskStation/LIDC/36368/pos_aug_0_36368/p1012_20000101_s32231_0_b21.npy')
plt.subplot(2,4,1); plt.imshow(nodSimple1[:,:,i], cmap = 'gray');
plt.subplot(2,4,2); plt.imshow(nodSimple2[:,:,i], cmap = 'gray');
plt.subplot(2,4,3); plt.imshow(nodSimple3[:,:,i], cmap = 'gray');
plt.subplot(2,4,4); plt.imshow(nodSimple4[:,:,i], cmap = 'gray');
plt.subplot(2,4,5); plt.imshow(nodSimple5[:,:,i], cmap = 'gray');
plt.subplot(2,4,6); plt.imshow(nodSimple6[:,:,i], cmap = 'gray');
plt.subplot(2,4,7); plt.imshow(nodSimple7[:,:,i], cmap = 'gray');
plt.subplot(2,4,8); plt.imshow(nodSimple8[:,:,i], cmap = 'gray'); | bsd-2-clause |
jpn--/larch | larch/util/activitysim/nonmand_tour_freq.py | 1 | 5435 | import numpy as np
import pandas as pd
import re
import os
import yaml
import itertools
from typing import Mapping
from larch import P, X, DataFrames, Model
from larch.util import Dict
from pathlib import Path
import logging
from ...log import logger_name
from .general import remove_apostrophes, linear_utility_from_spec, apply_coefficients, cv_to_ca
_logger = logging.getLogger(logger_name)
def interaction_simulate_data(
name="non_mandatory_tour_frequency",
edb_directory="output/estimation_data_bundle/{name}/",
settings_file="{name}_model_settings.yaml",
spec_file="{name}_SPEC.csv",
alt_def_file="{name}_alternatives.csv",
coefficients_files="{segment_name}/{name}_coefficients_{segment_name}.csv",
chooser_data_files="{segment_name}/{name}_choosers_combined.csv",
alt_values_files="{segment_name}/{name}_interaction_expression_values.csv",
):
edb_directory = edb_directory.format(name=name)
def _read_csv(filename, **kwargs):
filename = filename.format(name=name)
return pd.read_csv(os.path.join(edb_directory, filename), **kwargs)
settings_file = settings_file.format(name=name)
with open(os.path.join(edb_directory, settings_file), "r") as yf:
settings = yaml.load(
yf,
Loader=yaml.SafeLoader,
)
coefficients = {}
chooser_data = {}
alt_values = {}
segment_names = [s['NAME'] for s in settings['SPEC_SEGMENTS']]
for segment_name in segment_names:
coefficients[segment_name] = _read_csv(
coefficients_files.format(name=name, segment_name=segment_name),
index_col='coefficient_name',
)
chooser_data[segment_name] = _read_csv(
chooser_data_files.format(name=name, segment_name=segment_name),
)
alt_values[segment_name] = _read_csv(
alt_values_files.format(name=name, segment_name=segment_name),
)
spec = _read_csv(
spec_file,
)
spec = remove_apostrophes(spec, ['Label'])
# alt_names = list(spec.columns[3:])
# alt_codes = np.arange(1, len(alt_names) + 1)
# alt_names_to_codes = dict(zip(alt_names, alt_codes))
# alt_codes_to_names = dict(zip(alt_codes, alt_names))
alt_def = _read_csv(alt_def_file.format(name=name), index_col=0)
return Dict(
edb_directory=Path(edb_directory),
settings=settings,
chooser_data=chooser_data,
coefficients=coefficients,
alt_values=alt_values,
spec=spec,
alt_def=alt_def,
)
def link_same_value_coefficients(segment_names, coefficients, spec):
# Assume all coefficients with exactly equal current values are
# actually the same estimated coefficient value and should be
# treated as such by Larch. Comment out this function where called to relax
# this assumption, although be careful about the number of unique
# parameters to estimate in these models.
relabel_coef = {}
for segment_name in segment_names:
coef_backwards_map = dict([(j, i) for i, j in coefficients[segment_name]['value'].items()])
relabel_coef[segment_name] = r = coefficients[segment_name]['value'].map(coef_backwards_map)
spec[segment_name] = spec[segment_name].map(r)
def unavail_parameters(model):
return model.pf.index[(model.pf.value < -900) & (model.pf.holdfast != 0)]
def unavail_data_cols(model):
locks = unavail_parameters(model)
return [i.data for i in model.utility_ca if i.param in locks]
def unavail(model, x_ca):
lock_data = unavail_data_cols(model)
if len(lock_data):
unav = x_ca[lock_data[0]] > 0
for j in lock_data[1:]:
unav |= x_ca[j] > 0
return unav
def nonmand_tour_freq_model(
edb_directory="output/estimation_data_bundle/{name}/",
return_data=False,
):
data = interaction_simulate_data(
name="non_mandatory_tour_frequency",
edb_directory=edb_directory,
)
settings = data.settings
segment_names = [s['NAME'] for s in settings['SPEC_SEGMENTS']]
link_same_value_coefficients(segment_names, data.coefficients, data.spec)
spec = data.spec
coefficients = data.coefficients
chooser_data = data.chooser_data
alt_values = data.alt_values
alt_def = data.alt_def
m = {}
for segment_name in segment_names:
segment_model = m[segment_name] = Model()
# One of the alternatives is coded as 0, so
# we need to explicitly initialize the MNL nesting graph
# and set to root_id to a value other than zero.
segment_model.initialize_graph(alternative_codes=alt_def.index, root_id=9999)
### Utility specifications
segment_model.utility_ca = linear_utility_from_spec(
spec, x_col='Label', p_col=segment_name,
)
apply_coefficients(coefficients[segment_name], segment_model)
segment_model.choice_co_code = 'override_choice'
### Attach Data
x_co = chooser_data[segment_name].set_index('person_id').rename(columns={'TAZ': 'HOMETAZ'})
x_ca = cv_to_ca(
alt_values[segment_name].set_index(['person_id', 'variable'])
)
d = DataFrames(
co=x_co,
ca=x_ca,
av=~unavail(segment_model, x_ca),
)
m[segment_name].dataservice = d
if return_data:
return m, data
return m | gpl-3.0 |
ankurankan/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/mod/analyser/mod.py | 1 | 10204 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from collections import defaultdict
import six
from enum import Enum
import numpy as np
import pandas as pd
from rqalpha.interface import AbstractMod
from rqalpha.events import EVENT
from rqalpha.const import ACCOUNT_TYPE, EXIT_CODE
from rqalpha.utils.risk import Risk
from rqalpha.utils.repr import properties
from rqalpha.execution_context import ExecutionContext
class AnalyserMod(AbstractMod):
def __init__(self):
self._env = None
self._mod_config = None
self._enabled = False
self._result = None
self._orders = defaultdict(list)
self._trades = []
self._total_portfolios = []
self._sub_portfolios = defaultdict(list)
self._positions = defaultdict(list)
self._benchmark_daily_returns = []
self._portfolio_daily_returns = []
self._latest_portfolio = None
self._latest_benchmark_portfolio = None
def start_up(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._enabled = (self._mod_config.record or self._mod_config.plot or self._mod_config.output_file or
self._mod_config.plot_save_file or self._mod_config.report_save_path)
if self._enabled:
env.event_bus.add_listener(EVENT.POST_SETTLEMENT, self._collect_daily)
env.event_bus.add_listener(EVENT.TRADE, self._collect_trade)
env.event_bus.add_listener(EVENT.ORDER_CREATION_PASS, self._collect_order)
def _collect_trade(self, account, trade):
self._trades.append(self._to_trade_record(trade))
def _collect_order(self, account, order):
self._orders[order.trading_datetime.date()].append(order)
def _collect_daily(self):
date = self._env.calendar_dt.date()
portfolio = self._env.account.get_portfolio(date)
self._latest_portfolio = portfolio
self._portfolio_daily_returns.append(portfolio.daily_returns)
self._total_portfolios.append(self._to_portfolio_record(date, portfolio))
if ACCOUNT_TYPE.BENCHMARK in self._env.accounts:
self._latest_benchmark_portfolio = self._env.accounts[ACCOUNT_TYPE.BENCHMARK].portfolio
self._benchmark_daily_returns.append(self._latest_benchmark_portfolio.daily_returns)
else:
self._benchmark_daily_returns.append(0)
for account_type, account in six.iteritems(self._env.accounts):
portfolio = account.get_portfolio(date)
self._sub_portfolios[account_type].append(self._to_portfolio_record2(date, portfolio))
for order_book_id, position in six.iteritems(portfolio.positions):
self._positions[account_type].append(self._to_position_record(date, order_book_id, position))
def _symbol(self, order_book_id):
return self._env.data_proxy.instruments(order_book_id).symbol
@staticmethod
def _safe_convert(value, ndigits=3):
if isinstance(value, Enum):
return value.name
if isinstance(value, (float, np.float64, np.float32, np.float16, np.float)):
return round(value, ndigits)
return value
def _to_portfolio_record(self, date, portfolio):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(portfolio))
if not k.startswith('_') and not k.endswith('_') and k not in {
"positions", "start_date", "starting_cash"
}
}
data['date'] = date
return data
def _to_portfolio_record2(self, date, portfolio):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(portfolio.__dict__)
if not k.startswith('_') and not k.endswith('_') and k not in {
"positions", "start_date", "starting_cash"
}
}
data['date'] = date
return data
def _to_position_record(self, date, order_book_id, position):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(position.__dict__)
if not k.startswith('_') and not k.endswith('_')
}
data['order_book_id'] = order_book_id
data['symbol'] = self._symbol(order_book_id)
data['date'] = date
return data
def _to_trade_record(self, trade):
data = {
k: self._safe_convert(v) for k, v in six.iteritems(properties(trade))
if not k.startswith('_') and not k.endswith('_') and k != 'order'
}
data['order_book_id'] = trade.order.order_book_id
data['symbol'] = self._symbol(trade.order.order_book_id)
data['side'] = self._safe_convert(trade.order.side)
data['position_effect'] = self._safe_convert(trade.order.position_effect)
data['datetime'] = data['datetime'].strftime("%Y-%m-%d %H:%M:%S")
data['trading_datetime'] = data['trading_datetime'].strftime("%Y-%m-%d %H:%M:%S")
return data
def tear_down(self, code, exception=None):
if code != EXIT_CODE.EXIT_SUCCESS or not self._enabled:
return
strategy_name = os.path.basename(self._env.config.base.strategy_file).split(".")[0]
data_proxy = self._env.data_proxy
summary = {
'strategy_name': strategy_name,
}
for k, v in six.iteritems(self._env.config.base.__dict__):
if k in ["trading_calendar", "account_list", "timezone", "persist_mode",
"resume_mode", "data_bundle_path", "handle_split", "persist"]:
continue
summary[k] = self._safe_convert(v, 2)
risk = Risk(np.array(self._portfolio_daily_returns), np.array(self._benchmark_daily_returns),
data_proxy.get_risk_free_rate(self._env.config.base.start_date, self._env.config.base.end_date),
(self._env.config.base.end_date - self._env.config.base.start_date).days + 1)
summary.update({
'alpha': self._safe_convert(risk.alpha, 3),
'beta': self._safe_convert(risk.beta, 3),
'sharpe': self._safe_convert(risk.sharpe, 3),
'information_ratio': self._safe_convert(risk.information_ratio, 3),
'downside_risk': self._safe_convert(risk.annual_downside_risk, 3),
'tracking_error': self._safe_convert(risk.annual_tracking_error, 3),
'sortino': self._safe_convert(risk.sortino, 3),
'volatility': self._safe_convert(risk.annual_volatility, 3),
'max_drawdown': self._safe_convert(risk.max_drawdown, 3),
})
summary.update({
k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(self._latest_portfolio))
if k not in ["positions", "daily_returns", "daily_pnl"]
})
if self._latest_benchmark_portfolio:
summary['benchmark_total_returns'] = self._latest_benchmark_portfolio.total_returns
summary['benchmark_annualized_returns'] = self._latest_benchmark_portfolio.annualized_returns
trades = pd.DataFrame(self._trades)
if 'datetime' in trades.columns:
trades = trades.set_index('datetime')
df = pd.DataFrame(self._total_portfolios)
df['date'] = pd.to_datetime(df['date'])
total_portfolios = df.set_index('date').sort_index()
result_dict = {
'summary': summary,
'trades': trades,
'total_portfolios': total_portfolios,
}
if ExecutionContext.plots is not None:
plots = ExecutionContext.plots.get_plots()
plots_items = defaultdict(dict)
for series_name, value_dict in six.iteritems(plots):
for date, value in six.iteritems(value_dict):
plots_items[date][series_name] = value
plots_items[date]["date"] = date
df = pd.DataFrame([dict_data for date, dict_data in six.iteritems(plots_items)])
df["date"] = pd.to_datetime(df["date"])
df = df.set_index("date").sort_index()
result_dict["plots"] = df
for account_type, account in six.iteritems(self._env.accounts):
account_name = account_type.name.lower()
portfolios_list = self._sub_portfolios[account_type]
df = pd.DataFrame(portfolios_list)
df["date"] = pd.to_datetime(df["date"])
portfolios_df = df.set_index("date").sort_index()
result_dict["{}_portfolios".format(account_name)] = portfolios_df
positions_list = self._positions[account_type]
positions_df = pd.DataFrame(positions_list)
if "date" in positions_df.columns:
positions_df["date"] = pd.to_datetime(positions_df["date"])
positions_df = positions_df.set_index("date").sort_index()
result_dict["{}_positions".format(account_name)] = positions_df
self._result = result_dict
if self._mod_config.output_file:
with open(self._mod_config.output_file, 'wb') as f:
pickle.dump(result_dict, f)
if self._mod_config.plot:
from rqalpha.plot import plot_result
plot_result(result_dict)
if self._mod_config.plot_save_file:
from rqalpha.plot import plot_result
plot_result(result_dict, False, self._mod_config.plot_save_file)
if self._mod_config.report_save_path:
from rqalpha.utils.report import generate_report
generate_report(result_dict, self._mod_config.report_save_path)
| mit |
gammalib/gammalib | inst/cta/test/dev/test_gauss.py | 1 | 3354 | #! /usr/bin/env python
# ==========================================================================
# This script tests the Gaussian gradient.
#
# Requires:
# - matplotlib (optional)
#
# Copyright (C) 2012 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
from gammalib import *
from math import *
# ====================== #
# Test Gaussian gradient #
# ====================== #
def gauss(dir, sigma):
"""
Test Gaussian gradient.
"""
# Compute derivative
dh = 0.0001
s = GSkyDir()
e = GEnergy()
t = GTime()
f1 = GModelSpatialRadialGauss(s, sigma)
f2 = GModelSpatialRadialGauss(s, sigma + dh)
v1 = f1.eval(GPhoton(dir, e, t), True)
g1 = f1[2].gradient()
v2 = f2.eval(GPhoton(dir, e, t), True)
g2 = f2[2].gradient()
g = (v2 - v1) / dh
# Print result
#print v1, v2
#print g1, g2, g
# Return
return g
# ============= #
# Show Gaussian #
# ============= #
def show_gaussian(sigma):
"""
Show Gaussian using matplotlib (if available).
"""
# Only proceed if matplotlib is available
try:
# Import matplotlib
import matplotlib.pyplot as plt
# Create figure
plt.figure(1)
plt.title("Gaussian (sigma=" + str(sigma) + " deg)")
# Setup gaussian
skydir = GSkyDir()
gauss = GModelSpatialRadialGauss(skydir, sigma)
# Create angular axis
theta = [i * sigma * 0.05 for i in range(50)]
# Extract function
f_gauss = []
f_expected = []
sigma_rad = sigma * (pi / 180.0)
norm = 1.0 / (2.0 * pi * sigma_rad * sigma_rad)
eng = GEnergy()
time = GTime()
for t in theta:
s = GSkyDir()
s.radec_deg(0.0, t)
f = gauss.eval(GPhoton(s, eng, time))
e = norm * exp(-0.5 * t * t / sigma / sigma)
f_gauss.append(f)
f_expected.append(e)
# Plot data
plt.plot(theta, f_gauss, 'r-')
plt.plot(theta, f_expected, 'b.')
# Set axes
plt.xlabel("Separation (deg)")
plt.ylabel("Function value")
# Show plot
plt.show()
except ImportError:
print("Matplotlib is not (correctly) installed on your system.")
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Test Gaussian gradient.
"""
# Dump header
print("")
print("**************************")
print("* Test Gaussian gradient *")
print("**************************")
# Show Gaussian
show_gaussian(3.0)
| gpl-3.0 |
hammerlab/pyensembl | setup.py | 1 | 2738 | # Copyright (c) 2014-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
from setuptools import setup
package_name = "pyensembl"
current_directory = os.path.dirname(__file__)
readme_filename = 'README.md'
readme_path = os.path.join(current_directory, readme_filename)
github_url = "https://github.com/openvax/%s" % package_name
try:
with open(readme_path, 'r') as f:
readme_markdown = f.read()
except IOError as e:
print(e)
print("Failed to open %s" % readme_path)
readme_markdown = ""
with open('%s/__init__.py' % package_name, 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(),
re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
if __name__ == '__main__':
setup(
name=package_name,
version=version,
description="Python interface to ensembl reference genome metadata",
author="Alex Rubinsteyn",
author_email="[email protected]",
url=github_url,
license="http://www.apache.org/licenses/LICENSE-2.0.html",
entry_points={
'console_scripts': [
'pyensembl = %s.shell:run' % package_name
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
install_requires=[
"typechecks>=0.0.2",
"numpy>=1.7",
"pandas>=0.15",
"datacache>=1.1.4",
"memoized-property>=1.0.2",
"six>=1.9.0",
"gtfparse>=1.1.0",
"serializable",
"tinytimer",
],
long_description=readme_markdown,
long_description_content_type='text/markdown',
packages=[package_name],
package_data={package_name: ['logging.conf']},
)
| apache-2.0 |
hrjn/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 159 | 2951 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
barbagroup/conferences | GTC2016/plots/scripts/readCylinder2dRe40.py | 1 | 2840 | # coding: utf-8
'''
read performance results of 2D cylinder flow, Re40
'''
import os
from matplotlib import pyplot
from parsePetIBMperformance import *
def plotOneCylinderFig(
caseNames, caseNameDict, events, timing, plotOrder, figName, save):
'''
Plot the figures for cylinder flow tests.
'''
assert save in [True, False], \
"Use True or False to specify whether to " +\
"save the figure or show the figure on screen"
fig = pyplot.figure(figsize=(10, 8), dpi=150)
ax = fig.gca()
color_cycle = ax._get_lines.prop_cycler
bw = 0.5
bLoc = {caseName: i+0.75 for i, caseName in enumerate(caseNames)}
bOffSet = {caseName: 0 for i, caseName in enumerate(caseNames)}
xticks = [caseNameDict[name] for name in caseNames]
for i in plotOrder:
c = next(color_cycle)['color']
for caseName in caseNames:
ax.bar(bLoc[caseName], timing[caseName][events[i]], bw,
label=events[i], bottom=bOffSet[caseName],
color=c, lw=0, zorder=0)
bOffSet[caseName] += timing[caseName][events[i]]
ax.set_ylabel("Time (min)", fontsize=16)
ax.set_title("PetIBM + AmgX", fontsize=22)
ax.set_xlim(0.5, 3.5)
ax.set_xticks(range(1, 4))
ax.set_xticklabels(xticks, fontsize=14, fontweight="bold")
ax.yaxis.grid(zorder=10)
bars, labels = ax.get_legend_handles_labels()
pyplot.subplots_adjust(top=0.95, bottom=0.225, left=0.0775, right=0.98)
pyplot.figlegend(bars[0::len(caseNames)], labels[0::len(caseNames)], ncol=2,
loc=8, bbox_to_anchor=[0.525, 0.005], fontsize=15)
if save:
pyplot.savefig(figName)
else:
pyplot.show()
def plotCylinderFigs(folder, saveDir, save):
'''
Plot all figures for 2D cylinder flow (Re=40) for GTC presentation
'''
assert save in [True, False], \
"Use True or False to specify whether to " +\
"save the figure or show the figure on screen"
data, events = parseResultFiles(folder, "m")
caseNameDict = {
"pure6CPU": "6 CPU\nonly",
"6CPU1GPU_Consolidation": "6 CPU\n&1 GPU",
"6CPU1GPU_noConsolidation": "6 CPU\n&1 GPU",
"1CPU1GPU": "1CPU\n&1 GPU"}
cases = [["pure6CPU", "1CPU1GPU"],
["pure6CPU", "1CPU1GPU", "6CPU1GPU_noConsolidation"],
["pure6CPU", "1CPU1GPU", "6CPU1GPU_Consolidation"]]
plotOrder = [4, 2, 3, 1, 5, 0]
figNames = [
saveDir + "/rawAmgXMultiRanks_Cylinder2d_1.png",
saveDir + "/rawAmgXMultiRanks_Cylinder2d_2.png",
saveDir + "/newAmgXMultiRanks_Cylinder2d.png"]
for i in range(len(cases)):
plotOneCylinderFig(
cases[i], caseNameDict, events, data, plotOrder, figNames[i], save)
| mit |
MaxStrange/ArtieInfant | scripts/ad-hoc/vowel_embedding.py | 1 | 6030 | """
Loads an autoencoder model, plots the latent space for the test set and the
plots sounds from a directory on top of that space, with arrows pointing to
each of the overlaid sounds. The arrows have labels that are the file names
of the sounds (without the extension).
"""
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Load the stuff we need from ArtieInfant proper
sys.path.append(os.path.abspath("../../"))
sys.path.append(os.path.abspath("../../Artie"))
from experiment.thesis import phase1 # pylint: disable=locally-disabled, import-error
from experiment.analysis.vae import plotvae # pylint: disable=locally-disabled, import-error
def _plot_projections(test_set_embeddings: np.ndarray, special_embeddings: np.ndarray, labels: [str]) -> None:
"""
Projects the 3D embeddings onto the three planes (X, Y), (X, Z), and (Y, Z).
Asserts that the embeddings are 3-dimensional.
"""
if test_set_embeddings.shape[0] == 0:
print("No test_set_embeddings. Can't project.")
return
assert test_set_embeddings.shape[1] == 3, "This only works for 3D embeddings."
fig = plt.figure()
ax = fig.add_subplot(131)
ax.set_xlabel('(X, Y)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
ax = fig.add_subplot(132)
ax.set_xlabel('(X, Z)')
ax.scatter(test_set_embeddings[:, 0], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 2], c='red')
ax = fig.add_subplot(133)
ax.set_xlabel('(Y, Z)')
ax.scatter(test_set_embeddings[:, 1], test_set_embeddings[:, 2])
ax.scatter(special_embeddings[:, 1], special_embeddings[:, 2], c='red')
fig.suptitle("Projection of 3D Embeddings")
save = "scatter_embeddings_ad_hoc_projections.png"
plt.savefig(save)
plt.show()
plt.clf()
def _plot(test_embeddings: np.ndarray, special_embeddings: np.ndarray, special_labels: [str], ndims: int) -> None:
"""
Plots the given embeddings and labels.
"""
fig = plt.figure()
if ndims == 1:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.scatter(test_embeddings, np.zeros_like(test_embeddings))
ax.scatter(special_embeddings, np.zeros_like(special_embeddings), c='red')
elif ndims == 2:
ax = fig.add_subplot(111)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], c='red')
elif ndims == 3:
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.scatter(test_embeddings[:, 0], test_embeddings[:, 1], test_embeddings[:, 2])
ax.scatter(special_embeddings[:, 0], special_embeddings[:, 1], special_embeddings[:, 2], c='red')
else:
raise ValueError("`ndims` must be 1, 2, or 3, but is {}".format(ndims))
ax.set_title("Scatter Plot of Embeddings")
save = "scatter_embeddings_ad_hoc.png"
print("Saving", save)
plt.savefig(save)
plt.show()
plt.clf()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('aemodelpath', type=str, help="Path to the Auto Encoder weights.")
parser.add_argument('specmode', choices=['long', 'short'], help="Long: 241x20x1 spectrograms; Short: 81x18x1")
parser.add_argument('overlaydir', type=str, help="Directory that contains the sound files you want to overlay on the test set's embeddings")
parser.add_argument('--ndims', default=3, type=int, help="The number of dimensions of the latent space for the given model of autoencoder.")
parser.add_argument('--projection', action='store_true', help="If present, we will project the plot onto the three planes (X, Y), (X, Z), and (Y, Z). Only works if ndims is 3, ignored otherwise.")
args = parser.parse_args()
# Validate args
if not os.path.isfile(args.aemodelpath):
print("Not a file: {}".format(args.aemodelpath))
exit(1)
elif not os.path.isdir(args.overlaydir):
print("Not a directory: {}".format(args.overlaydir))
exit(2)
# Set stuff up based on what mode we are
if args.specmode == 'long':
input_shape = [241, 20, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/test_spectrogram_images/test_set"
sample_rate_hz = 16000.0
duration_s = 0.5
window_length_s = 0.03
ae = phase1._build_vae1(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
else:
input_shape = [81, 18, 1]
testdir = "/home/max/Dropbox/thesis/harddrive_backup/filterbank_images/test_set"
sample_rate_hz = 8000.0
duration_s = 0.3
window_length_s = 0.02
ae = phase1._build_vae2(is_variational=False, input_shape=input_shape, latent_dim=args.ndims, optimizer='adadelta', loss='mse', tbdir=None, kl_loss_prop=None, recon_loss_prop=None, std_loss_prop=None)
# Load the weights into the autoencoder
ae.load_weights(args.aemodelpath)
# Encode the test set
_, _, test_set_embeddings = plotvae._predict_on_spectrograms(testdir, ae, batchsize=32, nworkers=4, imshapes=input_shape)
# Encode the audio files found in the directory
_, _, special_embeddings, labels = plotvae._predict_on_sound_files(fpaths=None, dpath=args.overlaydir, model=ae, sample_rate_hz=sample_rate_hz, duration_s=duration_s, window_length_s=window_length_s)
# Now plot the embedding space
_plot(test_set_embeddings, special_embeddings, labels, args.ndims)
if args.ndims == 3 and args.projection:
# We want to project the 3D plot onto the three planes
_plot_projections(test_set_embeddings, special_embeddings, labels)
| mit |
ahoyosid/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
ahoyosid/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
Reagankm/KnockKnock | venv/lib/python3.4/site-packages/matplotlib/tri/triplot.py | 21 | 3124 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
def triplot(ax, *args, **kwargs):
"""
Draw a unstructured triangular grid as lines and/or markers.
The triangulation to plot can be specified in one of two ways;
either::
triplot(triangulation, ...)
where triangulation is a :class:`matplotlib.tri.Triangulation`
object, or
::
triplot(x, y, ...)
triplot(x, y, triangles, ...)
triplot(x, y, triangles=triangles, ...)
triplot(x, y, mask=mask, ...)
triplot(x, y, triangles, mask=mask, ...)
in which case a Triangulation object will be created. See
:class:`~matplotlib.tri.Triangulation` for a explanation of these
possibilities.
The remaining args and kwargs are the same as for
:meth:`~matplotlib.axes.Axes.plot`.
Return a list of 2 :class:`~matplotlib.lines.Line2D` containing
respectively:
- the lines plotted for triangles edges
- the markers plotted for triangles nodes
**Example:**
.. plot:: mpl_examples/pylab_examples/triplot_demo.py
"""
import matplotlib.axes
tri, args, kwargs = Triangulation.get_from_args_and_kwargs(*args, **kwargs)
x, y, edges = (tri.x, tri.y, tri.edges)
# Decode plot format string, e.g., 'ro-'
fmt = ""
if len(args) > 0:
fmt = args[0]
linestyle, marker, color = matplotlib.axes._base._process_plot_format(fmt)
# Insert plot format string into a copy of kwargs (kwargs values prevail).
kw = kwargs.copy()
for key, val in zip(('linestyle', 'marker', 'color'),
(linestyle, marker, color)):
if val is not None:
kw[key] = kwargs.get(key, val)
# Draw lines without markers.
# Note 1: If we drew markers here, most markers would be drawn more than
# once as they belong to several edges.
# Note 2: We insert nan values in the flattened edges arrays rather than
# plotting directly (triang.x[edges].T, triang.y[edges].T)
# as it considerably speeds-up code execution.
linestyle = kw['linestyle']
kw_lines = kw.copy()
kw_lines['marker'] = 'None' # No marker to draw.
kw_lines['zorder'] = kw.get('zorder', 1) # Path default zorder is used.
if (linestyle is not None) and (linestyle not in ['None', '', ' ']):
tri_lines_x = np.insert(x[edges], 2, np.nan, axis=1)
tri_lines_y = np.insert(y[edges], 2, np.nan, axis=1)
tri_lines = ax.plot(tri_lines_x.ravel(), tri_lines_y.ravel(),
**kw_lines)
else:
tri_lines = ax.plot([], [], **kw_lines)
# Draw markers separately.
marker = kw['marker']
kw_markers = kw.copy()
kw_markers['linestyle'] = 'None' # No line to draw.
if (marker is not None) and (marker not in ['None', '', ' ']):
tri_markers = ax.plot(x, y, **kw_markers)
else:
tri_markers = ax.plot([], [], **kw_markers)
return tri_lines + tri_markers
| gpl-2.0 |
GianlucaBortoli/krafters | generate_plots.py | 1 | 3214 | #!/usr/bin/python3.4
import argparse
import matplotlib.pyplot as plt
import csv
import sys
import seaborn as sns
PLOT_TYPES = ["distribution", "mass", "raw"]
# Format of the chart:
# x axis -> append number
# y axis -> time for that particular append
def generateRawPlot(test):
# set figure size
plt.figure(figsize=(15, 6))
handles = []
# draw plot
for raw in test:
label = raw.pop(0)
xAxis = range(len(raw))
yAxis = [float(i) for i in raw]
handle, = plt.plot(xAxis, yAxis, label=label)
handles.append(handle)
# put axis labels
plt.xlabel("operations")
plt.ylabel("time (s)")
plt.legend(handles=handles)
def generateDistributionPlot(test):
sns.set(color_codes=True)
for row in test:
label = row.pop(0)
d = [float(i) for i in row]
# Plot a filled kernel density estimate
sns.distplot(d, hist=False, kde_kws={"shade": True}, label=label)
plt.xlim([-0.01, 0.1])
plt.xlabel("time (s)")
plt.ylabel("operations")
def generateMassPlot(test):
# set figure size
plt.figure(figsize=(15, 6))
handles = []
# draw plot
for raw in test:
label = raw.pop(0)
yAxis = [i / (len(raw)) for i in range(len(raw) + 1)]
values = sorted([float(i) for i in raw])
xAxis = [0] + values
handle, = plt.plot(xAxis, yAxis, label=label)
handles.append(handle)
# put axis labels
plt.xlabel("time (s)")
plt.ylabel("probability of completion")
plt.legend(handles=handles)
def main():
argument_parser = argparse.ArgumentParser(description="Generates graph from test csv")
argument_parser.add_argument("result_csv", type=str, nargs="*",
help="path to test.csv file")
argument_parser.add_argument("-t", "--type", type=str, choices=PLOT_TYPES, dest="type", default="raw",
help="type of graph to print")
argument_parser.add_argument("-o", "--output", type=str, dest="output_file_path",
help="path of the output graph file")
argument_parser.add_argument("-s", help="shows the results on a window", action="store_true")
args = argument_parser.parse_args()
data_series = []
try:
for result_csv in args.result_csv:
with open(result_csv, "r") as f:
rdr = csv.reader(f, delimiter=',', quotechar='|')
for row in rdr:
data_series.append(row)
except FileNotFoundError as e:
print("File '{}' not found".format(args.result_csv))
if not args.output_file_path:
if len(args.result_csv) > 1:
print("You must specify a output file name if you are printing a multiple file graph!")
exit(1)
output_file = args.result_csv[0]+".png"
else:
output_file = args.output_file_path
if args.type == "raw":
generateRawPlot(data_series)
elif args.type == "distribution":
generateDistributionPlot(data_series)
else:
generateMassPlot(data_series)
plt.savefig(output_file)
if args.s:
plt.show()
if __name__ == '__main__':
main()
| mit |
alan-unravel/bokeh | examples/plotting/server/burtin.py | 42 | 4826 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
from collections import OrderedDict
from math import log, sqrt
import numpy as np
import pandas as pd
from six.moves import cStringIO as StringIO
from bokeh.plotting import figure, show, output_server
antibiotics = """
bacteria, penicillin, streptomycin, neomycin, gram
Mycobacterium tuberculosis, 800, 5, 2, negative
Salmonella schottmuelleri, 10, 0.8, 0.09, negative
Proteus vulgaris, 3, 0.1, 0.1, negative
Klebsiella pneumoniae, 850, 1.2, 1, negative
Brucella abortus, 1, 2, 0.02, negative
Pseudomonas aeruginosa, 850, 2, 0.4, negative
Escherichia coli, 100, 0.4, 0.1, negative
Salmonella (Eberthella) typhosa, 1, 0.4, 0.008, negative
Aerobacter aerogenes, 870, 1, 1.6, negative
Brucella antracis, 0.001, 0.01, 0.007, positive
Streptococcus fecalis, 1, 1, 0.1, positive
Staphylococcus aureus, 0.03, 0.03, 0.001, positive
Staphylococcus albus, 0.007, 0.1, 0.001, positive
Streptococcus hemolyticus, 0.001, 14, 10, positive
Streptococcus viridans, 0.005, 10, 40, positive
Diplococcus pneumoniae, 0.005, 11, 10, positive
"""
drug_color = OrderedDict([
("Penicillin", "#0d3362"),
("Streptomycin", "#c64737"),
("Neomycin", "black" ),
])
gram_color = {
"positive" : "#aeaeb8",
"negative" : "#e69584",
}
df = pd.read_csv(StringIO(antibiotics),
skiprows=1,
skipinitialspace=True,
engine='python')
width = 800
height = 800
inner_radius = 90
outer_radius = 300 - 10
minr = sqrt(log(.001 * 1E4))
maxr = sqrt(log(1000 * 1E4))
a = (outer_radius - inner_radius) / (minr - maxr)
b = inner_radius - a * maxr
def rad(mic):
return a * np.sqrt(np.log(mic * 1E4)) + b
big_angle = 2.0 * np.pi / (len(df) + 1)
small_angle = big_angle / 7
x = np.zeros(len(df))
y = np.zeros(len(df))
output_server("burtin")
p = figure(plot_width=width, plot_height=height, title="",
x_axis_type=None, y_axis_type=None,
x_range=[-420, 420], y_range=[-420, 420],
min_border=0, outline_line_color="black",
background_fill="#f0e1d2", border_fill="#f0e1d2")
p.line(x+1, y+1, alpha=0)
# annular wedges
angles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle
colors = [gram_color[gram] for gram in df.gram]
p.annular_wedge(
x, y, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,
)
# small wedges
p.annular_wedge(x, y, inner_radius, rad(df.penicillin),
-big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,
color=drug_color['Penicillin'])
p.annular_wedge(x, y, inner_radius, rad(df.streptomycin),
-big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,
color=drug_color['Streptomycin'])
p.annular_wedge(x, y, inner_radius, rad(df.neomycin),
-big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,
color=drug_color['Neomycin'])
# circular axes and lables
labels = np.power(10.0, np.arange(-3, 4))
radii = a * np.sqrt(np.log(labels * 1E4)) + b
p.circle(x, y, radius=radii, fill_color=None, line_color="white")
p.text(x[:-1], radii[:-1], [str(r) for r in labels[:-1]],
text_font_size="8pt", text_align="center", text_baseline="middle")
# radial axes
p.annular_wedge(x, y, inner_radius-10, outer_radius+10,
-big_angle+angles, -big_angle+angles, color="black")
# bacteria labels
xr = radii[0]*np.cos(np.array(-big_angle/2 + angles))
yr = radii[0]*np.sin(np.array(-big_angle/2 + angles))
label_angle=np.array(-big_angle/2+angles)
label_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side
p.text(xr, yr, df.bacteria, angle=label_angle,
text_font_size="9pt", text_align="center", text_baseline="middle")
# OK, these hand drawn legends are pretty clunky, will be improved in future release
p.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)
p.text([-30, -30], [-370, -390], text=["Gram-" + gr for gr in gram_color.keys()],
text_font_size="7pt", text_align="left", text_baseline="middle")
p.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,
color=list(drug_color.values()))
p.text([-15, -15, -15], [18, 0, -18], text=list(drug_color.keys()),
text_font_size="9pt", text_align="left", text_baseline="middle")
p.xgrid.grid_line_color = None
p.ygrid.grid_line_color = None
show(p)
| bsd-3-clause |
e-mission/e-mission-server | emission/analysis/plotting/geojson/geojson_feature_converter.py | 2 | 15279 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import map
from builtins import str
from builtins import *
import logging
import geojson as gj
import copy
import attrdict as ad
import pandas as pd
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.timequery as estt
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.decorations.section_queries as esds
import emission.storage.decorations.timeline as esdtl
import emission.core.wrapper.location as ecwl
import emission.core.wrapper.cleanedsection as ecwcs
import emission.core.wrapper.entry as ecwe
import emission.core.common as ecc
# TODO: Move this to the section_features class instead
import emission.analysis.intake.cleaning.location_smoothing as eaicl
import emission.analysis.config as eac
def _del_non_derializable(prop_dict, extra_keys):
for key in extra_keys:
if key in prop_dict:
del prop_dict[key]
def _stringify_foreign_key(prop_dict, key_names):
for key_name in key_names:
if hasattr(prop_dict, key_name):
setattr(prop_dict, key_name, str(getattr(prop_dict,key_name)))
def location_to_geojson(location):
"""
Converts a location wrapper object into geojson format.
This is pretty easy - it is a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param location: the location object
:return: a geojson version of the location. the object is of type "Feature".
"""
try:
ret_feature = gj.Feature()
ret_feature.id = str(location.get_id())
ret_feature.geometry = location.data.loc
ret_feature.properties = copy.copy(location.data)
ret_feature.properties["feature_type"] = "location"
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
except Exception as e:
logging.exception(("Error while converting object %s" % location))
raise e
def place_to_geojson(place):
"""
Converts a place wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param place: the place object
:return: a geojson version of the place. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(place.get_id())
ret_feature.geometry = place.data.location
ret_feature.properties = copy.copy(place.data)
ret_feature.properties["feature_type"] = "place"
# _stringify_foreign_key(ret_feature.properties, ["ending_trip", "starting_trip"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def stop_to_geojson(stop):
"""
Converts a stop wrapper object into geojson format.
This is also pretty easy - it is just a point.
Since we have other properties that we care about, we make it a feature.
Then, all the other stuff goes directly into the properties since the wrapper is a dict too!
:param stop: the stop object
:return: a geojson version of the stop. the object is of type "Feature".
"""
ret_feature = gj.Feature()
ret_feature.id = str(stop.get_id())
ret_feature.geometry = gj.LineString()
ret_feature.geometry.coordinates = [stop.data.enter_loc.coordinates, stop.data.exit_loc.coordinates]
ret_feature.properties = copy.copy(stop.data)
ret_feature.properties["feature_type"] = "stop"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["location"])
return ret_feature
def section_to_geojson(section, tl):
"""
This is the trickiest part of the visualization.
The section is basically a collection of points with a line through them.
So the representation is a feature in which one feature which is the line, and one feature collection which is the set of point features.
:param section: the section to be converted
:return: a feature collection which is the geojson version of the section
"""
ts = esta.TimeSeries.get_time_series(section.user_id)
entry_it = ts.find_entries(["analysis/recreated_location"],
esda.get_time_query_for_trip_like(
"analysis/cleaned_section",
section.get_id()))
# TODO: Decide whether we want to use Rewrite to use dataframes throughout instead of python arrays.
# dataframes insert nans. We could use fillna to fill with default values, but if we are not actually
# using dataframe features here, it is unclear how much that would help.
feature_array = []
section_location_entries = [ecwe.Entry(entry) for entry in entry_it]
if len(section_location_entries) != 0:
logging.debug("first element in section_location_array = %s" % section_location_entries[0])
if not ecc.compare_rounded_arrays(section.data.end_loc.coordinates,
section_location_entries[-1].data.loc.coordinates,
digits=4):
logging.info("section_location_array[-1].data.loc %s != section.data.end_loc %s even after df.ts fix, filling gap" % \
(section_location_entries[-1].data.loc, section.data.end_loc))
if eac.get_config()["output.conversion.validityAssertions"]:
assert(False)
last_loc_doc = ts.get_entry_at_ts("background/filtered_location", "data.ts", section.data.end_ts)
if last_loc_doc is None:
logging.warning("can't find entry to patch gap, leaving gap")
else:
last_loc_entry = ecwe.Entry(last_loc_doc)
logging.debug("Adding new entry %s to fill the end point gap between %s and %s"
% (last_loc_entry.data.loc, section_location_entries[-1].data.loc,
section.data.end_loc))
section_location_entries.append(last_loc_entry)
points_line_feature = point_array_to_line(section_location_entries)
points_line_feature.id = str(section.get_id())
points_line_feature.properties.update(copy.copy(section.data))
# Update works on dicts, convert back to a section object to make the modes
# work properly
points_line_feature.properties = ecwcs.Cleanedsection(points_line_feature.properties)
points_line_feature.properties["feature_type"] = "section"
if eac.get_section_key_for_analysis_results() == esda.INFERRED_SECTION_KEY:
ise = esds.cleaned2inferred_section(section.user_id, section.get_id())
if ise is not None:
logging.debug("mapped cleaned section %s -> inferred section %s" %
(section.get_id(), ise.get_id()))
logging.debug("changing mode from %s -> %s" %
(points_line_feature.properties.sensed_mode, ise.data.sensed_mode))
points_line_feature.properties["sensed_mode"] = str(ise.data.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
else:
points_line_feature.properties["sensed_mode"] = str(points_line_feature.properties.sensed_mode)
_del_non_derializable(points_line_feature.properties, ["start_loc", "end_loc"])
# feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(points_line_feature)
return gj.FeatureCollection(feature_array)
def incident_to_geojson(incident):
ret_feature = gj.Feature()
ret_feature.id = str(incident.get_id())
ret_feature.geometry = gj.Point()
ret_feature.geometry.coordinates = incident.data.loc.coordinates
ret_feature.properties = copy.copy(incident.data)
ret_feature.properties["feature_type"] = "incident"
# _stringify_foreign_key(ret_feature.properties, ["ending_section", "starting_section", "trip_id"])
_del_non_derializable(ret_feature.properties, ["loc"])
return ret_feature
def geojson_incidents_in_range(user_id, start_ts, end_ts):
MANUAL_INCIDENT_KEY = "manual/incident"
ts = esta.TimeSeries.get_time_series(user_id)
uc = enua.UserCache.getUserCache(user_id)
tq = estt.TimeQuery("data.ts", start_ts, end_ts)
incident_entry_docs = list(ts.find_entries([MANUAL_INCIDENT_KEY], time_query=tq)) \
+ list(uc.getMessage([MANUAL_INCIDENT_KEY], tq))
incidents = [ecwe.Entry(doc) for doc in incident_entry_docs]
return list(map(incident_to_geojson, incidents))
def point_array_to_line(point_array):
points_line_string = gj.LineString()
# points_line_string.coordinates = [l.loc.coordinates for l in filtered_section_location_array]
points_line_string.coordinates = []
points_times = []
points_timestamps = []
for l in point_array:
# logging.debug("About to add %s to line_string " % l)
points_line_string.coordinates.append(l.data.loc.coordinates)
points_times.append(l.data.ts)
points_timestamps.append(int(round(l.data.ts * 1000)))
points_line_feature = gj.Feature()
points_line_feature.geometry = points_line_string
points_line_feature.properties = {}
points_line_feature.properties["times"] = points_times
points_line_feature.properties["timestamps"] = points_timestamps
return points_line_feature
def trip_to_geojson(trip, tl):
"""
Trips are the main focus of our current visualization, so they are most complex.
Each trip is represented as a feature collection with the following features:
- two features for the start and end places
- features for each stop in the trip
- features for each section in the trip
:param trip: the trip object to be converted
:param tl: the timeline used to retrieve related objects
:return: the geojson version of the trip
"""
feature_array = []
curr_start_place = tl.get_object(trip.data.start_place)
curr_end_place = tl.get_object(trip.data.end_place)
start_place_geojson = place_to_geojson(curr_start_place)
start_place_geojson["properties"]["feature_type"] = "start_place"
feature_array.append(start_place_geojson)
end_place_geojson = place_to_geojson(curr_end_place)
end_place_geojson["properties"]["feature_type"] = "end_place"
feature_array.append(end_place_geojson)
trip_tl = esdt.get_cleaned_timeline_for_trip(trip.user_id, trip.get_id())
stops = trip_tl.places
for stop in stops:
feature_array.append(stop_to_geojson(stop))
for i, section in enumerate(trip_tl.trips):
section_gj = section_to_geojson(section, tl)
feature_array.append(section_gj)
trip_geojson = gj.FeatureCollection(features=feature_array, properties=trip.data)
trip_geojson.id = str(trip.get_id())
feature_array.extend(geojson_incidents_in_range(trip.user_id,
curr_start_place.data.exit_ts,
curr_end_place.data.enter_ts))
if trip.metadata.key == esda.CLEANED_UNTRACKED_KEY:
# trip_geojson.properties["feature_type"] = "untracked"
# Since the "untracked" type is not correctly handled on the phone, we just
# skip these trips until
# https://github.com/e-mission/e-mission-phone/issues/118
# is fixed
# TODO: Once it is fixed, re-introduce the first line in this block
# and remove the None check in get_geojson_for_timeline
return None
else:
trip_geojson.properties["feature_type"] = "trip"
return trip_geojson
def get_geojson_for_ts(user_id, start_ts, end_ts):
tl = esdtl.get_cleaned_timeline(user_id, start_ts, end_ts)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_dt(user_id, start_local_dt, end_local_dt):
logging.debug("Getting geojson for %s -> %s" % (start_local_dt, end_local_dt))
tl = esdtl.get_cleaned_timeline_from_dt(user_id, start_local_dt, end_local_dt)
tl.fill_start_end_places()
return get_geojson_for_timeline(user_id, tl)
def get_geojson_for_timeline(user_id, tl):
"""
tl represents the "timeline" object that is queried for the trips and locations
"""
geojson_list = []
for trip in tl.trips:
try:
trip_geojson = trip_to_geojson(trip, tl)
if trip_geojson is not None:
geojson_list.append(trip_geojson)
except Exception as e:
logging.exception("Found error %s while processing trip %s" % (e, trip))
raise e
logging.debug("trip count = %d, geojson count = %d" %
(len(tl.trips), len(geojson_list)))
return geojson_list
def get_all_points_for_range(user_id, key, start_ts, end_ts):
import emission.storage.timeseries.timequery as estt
# import emission.core.wrapper.location as ecwl
tq = estt.TimeQuery("metadata.write_ts", start_ts, end_ts)
ts = esta.TimeSeries.get_time_series(user_id)
entry_it = ts.find_entries([key], tq)
points_array = [ecwe.Entry(entry) for entry in entry_it]
return get_feature_list_for_point_array(points_array)
def get_feature_list_for_point_array(points_array):
points_feature_array = [location_to_geojson(le) for le in points_array]
print ("Found %d features from %d points" %
(len(points_feature_array), len(points_array)))
feature_array = []
feature_array.append(gj.FeatureCollection(points_feature_array))
feature_array.append(point_array_to_line(points_array))
feature_coll = gj.FeatureCollection(feature_array)
return feature_coll
def get_feature_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
"""
Input DF should have columns called "ts", "latitude" and "longitude", or the corresponding
columns can be passed in using the ts, latitude and longitude parameters
"""
points_array = get_location_entry_list_from_df(loc_time_df, ts, latitude, longitude, fmt_time)
return get_feature_list_for_point_array(points_array)
def get_location_entry_list_from_df(loc_time_df, ts="ts", latitude="latitude", longitude="longitude", fmt_time="fmt_time"):
location_entry_list = []
for idx, row in loc_time_df.iterrows():
retVal = {"latitude": row[latitude], "longitude": row[longitude], "ts": row[ts],
"_id": str(idx), "fmt_time": row[fmt_time], "loc": gj.Point(coordinates=[row[longitude], row[latitude]])}
location_entry_list.append(ecwe.Entry.create_entry(
"dummy_user", "background/location", ecwl.Location(retVal)))
return location_entry_list
| bsd-3-clause |
noahbenson/pimms | pimms/table.py | 1 | 19575 | ####################################################################################################
# pimms/table.py
# Classes for storing immutable data tables.
# By Noah C. Benson
import copy, types, sys, pint, six
import numpy as np
import pyrsistent as ps
from functools import reduce
from .util import (merge, is_pmap, is_map, LazyPMap, lazy_map, is_lazy_map,
is_quantity, is_unit, is_str, is_int, is_vector,
quant, iquant, mag, unit, qhash, units, imm_array,
getargspec_py27like)
from .immutable import (immutable, value, param, require, option)
if sys.version_info[0] == 3: from collections import abc as colls
else: import collections as colls
def _ndarray_assoc(arr, k, v):
'_ndarray_assoc(arr, k, v) duplicates arr to a writeable array, sets arr2[k]=v, returns arr2'
arr = np.array(arr)
arr[k] = v
arr.setflags(write=False)
return arr
class ITableRow(colls.Mapping):
'''
ITableRow is a class that works with the ITable class to quickly and lazily allow access to
individual rows as if they were individual persistent maps. For all intents and purposes, an
ITableRow object should be treated as a dict object that cannot be changed.
Note that ITableRow is not an immutable class, but its members cannot be changed. The class
is intended as a hidden subclass that is very efficient.
'''
def __init__(self, data, colnames, rownum):
object.__setattr__(self, 'data', data)
object.__setattr__(self, 'column_names', colnames)
object.__setattr__(self, 'row_number', rownum)
def keys(self):
return self.column_names
def __setattr__(self, k, v):
raise RuntimeError('ITableRow object is immutable')
def __getitem__(self, key):
return self.data[key][self.row_number]
def __setitem__(self, key, value):
raise RuntimeError('Cannot set row of immutable table')
def __delitem__(self, key):
raise RuntimeError('Cannot set row of immutable table')
def __iter__(self):
dat = self.data
n = self.row_number
for col in self.column_names:
yield col
def __len__(self):
return len(self.column_names)
def asdict(self):
return {k:self.data[k][self.row_number] for k in self.__iter__()}
def aspmap(self):
return ps.pmap(self.asdict())
def __repr__(self):
return repr(self.asdict())
def __hash__(self):
return hash(self.aspmap())
@immutable
class ITable(colls.Mapping):
'''
The ITable class is a simple immutable datatable.
'''
def __init__(self, data, n=None):
self.data = data
self._row_count = n
def __hash__(self):
# we want to make sure arrays are immutable
return qhash(self.data)
def __getstate__(self):
d = self.__dict__.copy()
d['data'] = {k:(mag(v), unit(v)) if is_quantity(v) else (v, None)
for (k,v) in six.iteritems(self.data)}
return d
def __setstate__(self, d):
dat = d['data']
object.__setattr__(self, 'data',
ps.pmap({k:(imm_array(u) if v is None else iquant(u, v))
for (k,(u,v)) in six.iteritems(dat)}))
object.__setattr__(self, '_row_count', None)
@staticmethod
def _filter_col(vec):
'_filter_col(vec) yields a read-only numpy array version of the given column vector'
if isinstance(vec, types.FunctionType) and getargspec_py27like(vec)[0] == []:
return lambda:ITable._filter_col(vec())
elif is_quantity(vec):
m = mag(vec)
mm = ITable._filter_col(m)
return vec if m is mm else quant(mm, unit(vec))
else:
return imm_array(vec)
@param
def data(d):
'''
itbl.data is an immutable map of the given itable in which property names are associated
with their data vectors.
'''
# we want to check these values and clean them up as we go, but if this is a lazy map, we
# want to do that lazily...
if is_map(d):
if not is_lazy_map(d): d = lazy_map(d)
def _make_lambda(k): return (lambda:ITable._filter_col(d[k]))
return lazy_map(
{k:_make_lambda(k) if d.is_lazy(k) else ITable._filter_col(d[k])
for k in six.iterkeys(d)})
else:
raise ValueError('Unable to interpret data argument; must be a mapping')
@param
def _row_count(n):
'''
itbl._row_count is the row count, as provided by internal methods when the row count can be
known ahead of time. It should not geberally be used; use itbl.row_count instead.
'''
return n
@require
def validate_data(data):
'''
ITable data is required to be a PMap with keys that are strings.
'''
if not isinstance(data, ps.PMap):
raise ValueError('data is required to be a persistent map')
if not all(isinstance(k, six.string_types) for k in six.iterkeys(data)):
raise ValueError('data keys must be strings')
return True
@require
def validate_row_count(_row_count):
'''
ITable _row_count must be a non-negative integer or None.
'''
if _row_count is None: return True
else: return is_int(_row_count) and _row_count >= 0
@value
def column_names(data):
'''
itbl.column_names is a tuple of the names of the columns of the data table.
'''
return tuple(six.iterkeys(data))
@value
def row_count(data, _row_count):
'''
itbl.row_count is the number of rows in the given datatable itbl.
'''
if len(data) == 0:
return 0
elif _row_count:
return _row_count
elif is_lazy_map(data):
# if data is a lazy map, we look first for a column that isn't lazy:
k = next(data.iternormal(), None)
k = k if k else next(data.itermemoized(), None)
k = k if k else next(data.iterkeys())
return len(data[k])
else:
return len(next(six.itervalues(data), []))
@value
def columns(data, row_count):
'''
itbl.columns is a tuple of the columns in the given datatable itbl. Anything that depends on
columns includes a de-facto check that all columns are the same length.
'''
cols = tuple(v for v in six.itervalues(data))
if not all(len(c) == row_count for c in cols):
raise ValueError('itable columns do not all have identical lengths!')
return cols
@value
def rows(data, row_count, column_names):
'''
itbl.rows is a tuple of all the persistent maps that makeup the rows of the data table.
'''
return tuple([ITableRow(data, column_names, i) for i in range(row_count)])
@value
def dataframe(data):
'''
itbl.dataframe is a pandas dataframe object that is equivalent to the given itable. Note
you must have pandas installed for this to work; an exception will be raised when this
value is requested if you do not.
'''
import pandas
return pandas.DataFrame.from_dict(dict(data))
# Methods
def set(self, k, v):
'''
itbl.set(name, val) yields a new itable object identical to the given itbl except that it
includes the vector val under the given column name.
itbl.set(row, map) updates just the given row to have the properties in the given map; if
this results in a new column being added, it will have the value None for all other rows.
itbl.set(rows, m) allows a sequence of rows to be set by passing rows as either a list or
slice; m may also be a single map or a sequence of maps whose size matches that of rows.
Alternately, m may be an itable whose row-size matches that of rows; in this case new
column names may again be added.
'''
dat = self.data
if isinstance(k, six.string_types):
if isinstance(v, (ITable, colls.Mapping)): v = v[k]
v = self._filter_col(v)
new_data = self.data.set(k, v)
return ITable(new_data, n=len(v))
elif is_int(k):
# This is an awful slow way to do things
def _make_lambda(k):
return lambda:_ndarray_assoc(dat[k], k, v[k]) if k in v else dat[k]
new_map = {k:_make_lambda(k) for k in six.iterkeys(dat)}
nones = np.full((self.row_count,), None)
for (vk,v) in six.iteritems(v):
if vk not in new_map:
new_map[vk] = _ndarray_assoc(nones, k, v)
return ITable(lazy_map(new_map), n=self.row_count)
elif not k:
return self
elif isinstance(k[0], six.string_types):
nones = np.full((self.row_count,), None)
newdat = self.data
if isinstance(v, ITable):
def _make_lambda(k): return (lambda:self._filter_col(v[kk]))
v = lazy_map({kk:_make_lambda(kk) for kk in k})
elif not isinstance(v, colls.Mapping):
v = np.asarray(v)
if len(v) == self.row_count and v.shape[1] == len(k): v = v.T
v = {kk:self._filter_col(vv) for (kk,vv) in zip(k,v)}
for kk in six.iterkeys(v):
def _make_lambda(k): return (lambda:self._filter_col(v[kk]))
newdat = newdat.set(kk, _make_lambda(kk) if kk in v else nones)
return ITable(newdat, n=self.row_count)
else:
(keys, vals) = (k,v)
dat = self.data
nones = np.full((self.row_count,), None)
knones = np.full((len(keys),), None)
if isinstance(vals, (ITable, colls.Mapping)):
def _make_lambda(k):
return lambda:_ndarray_assoc(
dat[k] if k in dat else nones,
keys,
vals[k] if k in vals else knones)
dat = reduce(
lambda m,k: m.set(k, _make_lambda(k)),
six.iteritems(vals.data if isinstance(vals, ITable) else vals),
dat)
else:
def _make_lambda(k): return lambda:np.asarray([v[k] for v in vals])
cols = lazy_map({k:_make_lambda(k) for k in six.iterkeys(vals[0])})
def _make_lambda(k):
return lambda:_ndarray_assoc(
dat[k] if k in dat else nones,
keys,
cols[k])
dat = reduce(
lambda m,k: m.set(k, _make_lambda(k)),
six.iterkeys(vals[0]),
dat)
return ITable(dat, n=self.row_count)
def discard(self, cols):
'''
itbl.discard(arg) discards either the list of rows, given as ingtegers, or the list of
columns, given as strings.
'''
if not cols: return self
dat = self.data
vecq = is_vector(cols)
if is_str(cols) or (vecq and len(cols) > 0 and is_str(cols[0])):
cols = set(cols if vecq else [cols])
def _make_lambda(k): return lambda:dat[k]
return ITable(lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat) if k not in cols}),
n=self.row_count)
elif isinstance(cols, slice) or is_int(cols) or \
(vecq and len(cols) > 0 and is_int(cols[0])):
def _make_lambda(k): return lambda:np.delete(dat[k], cols, 0)
newdat = lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat)})
return ITable(newdat, n=len(np.delete(np.ones((self.row_count,)), cols, 0)))
elif vecq and len(cols) == 0: return self
else: raise ValueError('ITable.discard requires integers or strings')
def is_lazy(self, k):
'''
itable.is_lazy(k) yields True if k is a lazy value in the given itable, as in a lazy map.
'''
return self.data.is_lazy(k)
def is_memoized(self, k):
'''
itable.is_memoized(k) yields True if k is a memoized value in the given itable, as in a lazy
map.
'''
return self.data.is_memoized(k)
def is_normal(self, k):
'''
itable.is_normal(k) yields True if k is a normal value in the given itable, as in a lazy
map.
'''
return self.data.is_normal(k)
def iterkeys(self):
return self.data.iterkeys()
def iteritems(self):
return self.data.iteritems()
def iterlazy(self):
'''
itable.iterlazy() yields an iterator over the lazy keys only (memoized lazy keys are not
considered lazy).
'''
return self.data.iterlazy()
def itermemoized(self):
'''
itable.itermemoized() yields an iterator over the memoized keys only (neihter unmemoized
lazy keys nor normal keys are considered memoized).
'''
return self.data.itermemoized()
def iternormal(self):
'''
itable.iternormal() yields an iterator over the normal unlazy keys only (memoized lazy keys
are not considered normal).
'''
return self.data.iternormal()
def map(self, f):
'''
itbl.map(f) yields the result of mapping the rows of the given datatable itbl over the
given function f.
'''
if isinstance(f, six.string_types) and f in self.data: return self.data[f]
(args, vargs, kwargs, dflts) = getargspec_py27like(f)
dflts = dflts if dflts else ()
dflts = tuple([None for _ in range(len(args) - len(dflts))]) + dflts
# we have to decide what to map over...
return map(f, self.rows)
def where(self, f):
'''
itbl.where(f) yields the indices for which itbl.map(f) yields True.
'''
return [i for (i,v) in enumerate(self.map(f)) if v]
def select(self, arg):
'''
itbl.select(idcs) yields a sub-table in which only the rows indicated by the given list of
indices are kept.
itbl.select(f) keeps all rows for which the function f yields True.
'''
if isinstance(arg, types.FunctionType):
arg = self.where(arg)
else:
n = len(arg)
if n == self.row_count and set(arg) == set([0,1]):
arg = [i for (i,b) in enumerate(arg) if b]
n = len(arg)
dat = self.data
def _make_lambda(k): return lambda:dat[k][arg]
return ITable(
lazy_map({k:_make_lambda(k) for k in six.iterkeys(dat)}),
n=n)
def merge(self, *args, **kwargs):
'''
itbl.merge(...) yields a copy of the ITable object itbl that has been merged left-to-right
with the given arguments.
'''
return itable(self.data, *args, **kwargs).persist()
def __getitem__(self, rows, cols=Ellipsis):
'''
itbl[row_number] yields the map associated with the given row in the ITable object itbl; the
row_number may alternately be a slice.
itbl[[r1, r2...]] yields a duplicate itable containing only the given rows of itbl.
itbl[column_name] yields the numpy array associated with the given column name.
itbl[[c1, c2...]] yields a duplicate itable containing only the given columns of itbl.
itbl[rows, cols] is equivalent to itbl[rows][cols] (in fact, rows and cols may be given in
any order).
'''
if cols is not Ellipsis: return self[rows][cols]
if is_int(rows):
return self.rows[rows]
elif isinstance(rows, six.string_types):
return self.data[rows]
elif rows is None or len(rows) == 0:
return ITable(ps.m(), n=0)
elif isinstance(rows, slice) or is_int(rows[0]):
n = len(range(rows.start, rows.stop, rows.step)) if isinstance(rows, slice) else \
len(rows)
dat = self.data
def _make_lambda(dat,k): return lambda:dat[k][rows]
return ITable(
lazy_map({k:_make_lambda(dat,k) for k in six.iterkeys(dat)}),
n=n)
else:
rows = set(rows)
dat = self.data
return ITable(
reduce(lambda m,k: m if k in rows else m.remove(k), six.iterkeys(dat), dat),
n=self.row_count)
def __repr__(self):
return 'itable(%s, <%d rows>)' % (self.column_names, self.row_count)
def __iter__(self):
return six.iterkeys(self.data)
def __len__(self):
return len(self.data)
def __contains__(self, k):
return ((0 <= k < self.row_count) if is_int(k) else
(k in self.data) if isinstance(k, six.string_types) else
False)
def iterrows(self):
'''
itbl.iterrows() iterates over the rows of the givan itable itbl.
'''
return iter(self.rows)
def itable(*args, **kwargs):
'''
itable(...) yields a new immutable table object from the given set of arguments. The arguments
may be any number of maps or itables followed by any number of keyword arguments. All the
entries from the arguments and keywords are collapsed left-to-right (respecting laziness),
and the resulting column set is returned as the itable. Arguments and maps may contain
values that are functions of zero arguments; these are considered lazy values and are not
evaluated by the itable function.
'''
# a couple things we want to check first... does our argument list reduce to just an empty
# itable or just a single itable?
if len(args) == 0 and len(kwargs) == 0:
return ITable({}, n=0)
elif len(args) == 1 and len(kwargs) == 0 and isinstance(args[0], ITable):
return args[0]
# we want to try to convert any arguments we can from datatables into maps
try:
import pandas
args = [{k:a[k].values for k in a.keys()} if isinstance(a, pandas.DataFrame) else a
for a in args]
except Exception: pass
# now we want to merge these together and make them one lazy map
m0 = lazy_map(merge(args, kwargs))
# see if we can deduce the row size from a non-lazy argument:
v = m0
for mm in (list(args) + [kwargs]):
if len(mm) == 0: continue
if not is_lazy_map(mm):
for k in six.iterkeys(mm):
try: v = mm[k]
except Exception: continue
break
else:
for k in mm.iternormal():
try: v = mm[k]
except Exception: continue
break
for k in (mm.itermemoized() if v is m0 else []):
try: v = mm[k]
except Exception: continue
break
if v is not m0: break
return ITable(m0, n=(None if v is m0 else len(v)))
def is_itable(arg):
'''
is_itable(x) yields True if x is an ITable object and False otherwise.
'''
return isinstance(arg, ITable)
| gpl-2.0 |
GuessWhoSamFoo/pandas | asv_bench/benchmarks/panel_ctor.py | 2 | 1701 | import warnings
from datetime import datetime, timedelta
from pandas import DataFrame, Panel, date_range
class DifferentIndexes(object):
def setup(self):
self.data_frames = {}
start = datetime(1990, 1, 1)
end = datetime(2012, 1, 1)
for x in range(100):
end += timedelta(days=1)
idx = date_range(start, end)
df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
self.data_frames[x] = df
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
class SameIndexes(object):
def setup(self):
idx = date_range(start=datetime(1990, 1, 1),
end=datetime(2012, 1, 1),
freq='D')
df = DataFrame({'a': 0, 'b': 1, 'c': 2}, index=idx)
self.data_frames = dict(enumerate([df] * 100))
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
class TwoIndexes(object):
def setup(self):
start = datetime(1990, 1, 1)
end = datetime(2012, 1, 1)
df1 = DataFrame({'a': 0, 'b': 1, 'c': 2},
index=date_range(start=start, end=end, freq='D'))
end += timedelta(days=1)
df2 = DataFrame({'a': 0, 'b': 1, 'c': 2},
index=date_range(start=start, end=end, freq='D'))
dfs = [df1] * 50 + [df2] * 50
self.data_frames = dict(enumerate(dfs))
def time_from_dict(self):
with warnings.catch_warnings(record=True):
Panel.from_dict(self.data_frames)
from .pandas_vb_common import setup # noqa: F401
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/ensemble/__init__.py | 153 | 1382 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| mit |
stuart-knock/tvb-library | contrib/from_articles/region_deterministic_bnm_wc.py | 5 | 3642 | # -*- coding: utf-8 -*-
"""
What:
Reproduces Figures 23 and 24 of Sanz-Leon P., Knock, S. A., Spiegler, A. and Jirsa V.
Mathematical framework for large-scale brain network modelling in The Virtual Brain.
Neuroimage, 2014, (in review)
Needs:
A working installation of tvb
Run:
python region_deterministic_bnm_wc.py -s True -f True
#Subsequent calls can be made with:
python region_deterministic_bnm_wc.py -f True
.. author:: Paula Sanz-Leon
"""
import numpy
import argparse
from tvb.simulator.lab import *
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 19.42, 12 # that's default image size for this interactive session
pylab.rcParams.update({'font.size': 22})
parser = argparse.ArgumentParser(description='Reproduce results of Figure XX presented in Sanz-Leon et al 2014')
parser.add_argument('-s','--sim', help='Run the simulations', default=False)
parser.add_argument('-f','--fig', help='Plot the figures', default=False)
args = vars(parser.parse_args())
speed = 4.0
simulation_length = 512
oscilator = models.WilsonCowan(c_1 = 16., c_2=12., c_3=15., c_4=3, tau_e=8., tau_i=8., a_e=1.3, a_i=2., theta_e=4., theta_i=3.7)
white_matter = connectivity.Connectivity(load_default=True)
white_matter.speed = numpy.array([speed])
gcs = 8
white_matter_coupling = coupling.Linear(a=2**-gcs)
#Initialise an Integrator
heunint = integrators.HeunDeterministic(dt=2**-4)
#Initialise some Monitors with period in physical time
momo = monitors.Raw()
mama = monitors.TemporalAverage(period=2**-2)
#Bundle them
what_to_watch = (momo, mama)
#Initialise a Simulator -- Model, Connectivity, Integrator, and Monitors.
sim = simulator.Simulator(model = oscilator, connectivity = white_matter,
coupling = white_matter_coupling,
integrator = heunint, monitors = what_to_watch)
sim.configure()
LOG.info("Starting simulation...")
#Perform the simulation
raw_data = []
raw_time = []
tavg_data = []
tavg_time = []
for raw, tavg in sim(simulation_length=simulation_length):
if not raw is None:
raw_time.append(raw[0])
raw_data.append(raw[1])
if not tavg is None:
tavg_time.append(tavg[0])
tavg_data.append(tavg[1])
LOG.info("Finished simulation.")
#Make the lists numpy.arrays for easier use.
RAW = numpy.array(raw_data)
TAVG = numpy.array(tavg_data)
# <codecell>
numpy.save('region_deterministic_bnm_article_wc_raw.npy', RAW)
numpy.save('region_deterministic_bnm_article_wc_rawtime.npy', raw_time)
numpy.save('region_deterministic_bnm_article_wc_tavg.npy', TAVG)
numpy.save('region_deterministic_bnm_article_wc_tavgtime.npy', tavg_time)
if args['fig']:
RAW = numpy.load('region_deterministic_bnm_article_wc_raw.npy')
raw_time = numpy.load('region_deterministic_bnm_article_wc_rawtime.npy')
#Plot temporally averaged time series
figure(1)
subplot(1, 2, 1)
plot(raw_time, RAW[:, 0, :, 0], 'k', alpha=0.042, linewidth=3)
plot(raw_time, RAW[:, 1, :, 0], 'r', alpha=0.042, linewidth=3)
plot(raw_time, RAW[:, 0, :, 0].mean(axis=1), 'k', linewidth=3)
plot(raw_time, RAW[:, 1, :, 0].mean(axis=1), 'r', linewidth=3)
xlabel('time[ms]')
#ylim([-25, 5])
xlim([0, sim.simulation_length])
subplot(1, 2, 2)
plot(RAW[:, 0, :, 0], RAW[:, 1, :, 0], alpha=0.042)
plot(RAW[:, 0, :, 0].mean(axis=1), RAW[:, 1, :, 0].mean(axis=1), alpha=1.)
plot(RAW[0, 0, :, 0], RAW[0, 1, :, 0], 'bo', alpha=0.15)
xlabel(r'$E$')
ylabel(r'$I$')
show()
fig_name = 'wc_default_speed_' + str(int(white_matter.speed)) + '_gcs_2**-' + str(gcs) + '.pdf'
savefig(fig_name)
###EoF### | gpl-2.0 |
mkocka/galaxytea | integral/domcek/numerical_integration.py | 1 | 3477 | #!/usr/bin/env python
from __future__ import division
import matplotlib.pyplot as plt
import matplotlib
from scipy import constants as const
from pylab import *
#
def planck(T,lambda_i,lambda_f,step=10**(-1)):
x ,y = ([] for i in range(2))
for i in arange(lambda_i,lambda_f,step): # i*10**(-9) - correctuin to nm
f = 10**(-9)*(2*const.h*const.c**2)/(((i*10**(-9))**5) *(exp((const.c*const.h)/((i*10**(-9))*const.k*T))-1))
y.append(f)
x.append(i)
return x,y
def plot_f(x,y): #plots one planck function
plt.plot(x,y)
plt.xlabel('$\lambda$ [nm]')
plt.ylabel('Spectral radiance [W.sr$^{-1}$.m$^{-}$.nm$^{-1}$')
plt.grid()
plt.savefig("planck")
plt.gcf().clear()
#first 3 parameters characterize range of temperatures and step between them
#other 2 parameters characterize range of planck function
def plot_multiple(T_i,T_f,step,lambda_i,lambda_f): #plots multiple planck into one graph
for i in range(T_i,T_f,step):
a, b = planck(i,lambda_i,lambda_f)
plt.plot(a,b)
plt.xlabel('$\lambda$ [nm]')
plt.ylabel('Spectral radiance [W.sr$^{-1}$.m$^{-}$.nm$^{-1}$')
plt.grid()
plt.savefig("planck_functions")
plt.gcf().clear()
def midpoint_rule(a,b): #rectangle rule
dx = a[1]-a[0]
summation=0
for i in range(1,len(a)): #first and last value excluded
summation += dx*b[i]
return summation
def trapezoid_rule(a,b):
dx = a[1]-a[0]
summation=0.5*b[0]+0.5*b[len(b)-1]
for i in range(1,len(a)): #first and last value excluded
summation += dx*b[i]
return summation
def simpson(a,b):
dx = a[1]-a[0]
summation = dx*(b[0]+b[len(b)-1])/3
ax ,bx = ([] for i in range(2))
if (len(a)%2==0): # this rule needs odd number of values
a.pop() and b.pop()
for i in range(len(a)):
if i%2==0: summation +=4*b[i]*dx/3
if i%2==1: summation +=2*b[i]*dx/3
return summation
plot_multiple(5000,10000,1000,10,2000)
#defining boundries of calculating function
lambda_initial=10
lambda_final=10000
#temperature of blackbody
temperature=5778
#calculating planck function
a, b = planck(temperature,lambda_initial,lambda_final)
plot_f(a,b)
#theoretical stefan - boltzman value
stef_boltz = const.sigma*temperature**4
print 30*"-"
print "Planck function parameters"
print 30*"-"
print "Temperature:", temperature
print "Range of integration in nm: (", lambda_initial, ",", lambda_final,")"
print
print "Calculated values:"
print 30*"-"
#integration gives us value dependent on sr**-1, need to mutliply by appropriate constant (pi)
#for more information: http://en.wikipedia.org/wiki/Planck's_law#Stefan.E2.80.93Boltzmann_law
print "Stephan-Boltzman law:", stef_boltz
print "Midpoint rule method:", midpoint_rule(a,b)*const.pi, "Sigma:", midpoint_rule(a,b)*const.pi/(temperature**4)
print "Trapezoid rule method:", trapezoid_rule(a,b)*const.pi, "Sigma:", trapezoid_rule(a,b)*const.pi/(temperature**4)
print "Simpson's rule method:", simpson(a,b)*const.pi, "Sigma:", simpson(a,b)*const.pi/(temperature**4)
print "Theoretical Sigma:", ((const.pi)**2*(const.k)**4)/(60*((const.h)/(2*const.pi))**3*(const.c)**2)
print
print "Relative errors"
print 30*"-"
print "Midpoint rule method:", (stef_boltz - midpoint_rule(a,b)*const.pi) / stef_boltz
print "Trapezoid rule method:", (stef_boltz - trapezoid_rule(a,b)*const.pi) / stef_boltz
print "Simpson's rule method:", (stef_boltz - simpson(a,b)*const.pi) / stef_boltz
print midpoint_rule(a,b)*const.pi / stef_boltz
print trapezoid_rule(a,b)*const.pi / stef_boltz
print simpson(a,b)*const.pi / stef_boltz
| mit |
tillraab/thunderfish | setup.py | 3 | 2124 | from setuptools import setup, find_packages
exec(open('thunderfish/version.py').read())
long_description = """
# ThunderFish
Algorithms and programs for analysing electric field recordings of
weakly electric fish.
[Documentation](https://bendalab.github.io/thunderfish) |
[API Reference](https://bendalab.github.io/thunderfish/api)
Weakly electric fish generate an electric organ discharge (EOD). In
wave-type fish the EOD resembles a sinewave of a specific frequency
and with higher harmonics. In pulse-type fish EODs have a distinct
waveform and are separated in time. The thunderfish package provides
algorithms and tools for analysing both wavefish and pulsefish EODs.
"""
setup(
name = 'thunderfish',
version = __version__,
author = 'Jan Benda, Juan F. Sehuanes, Till Raab, Jörg Henninger, Jan Grewe, Fabian Sinz, Liz Weerdmeester',
author_email = "[email protected]",
description = 'Algorithms and scripts for analyzing recordings of electric fish waveforms.',
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/bendalab/thunderfish",
license = "GPLv3",
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages = find_packages(exclude = ['contrib', 'docs', 'tests*']),
entry_points = {
'console_scripts': [
'thunderfish = thunderfish.thunderfish:main',
'fishfinder = thunderfish.fishfinder:main',
'collectfish = thunderfish.collectfish:main',
'eodexplorer = thunderfish.eodexplorer:main',
]},
python_requires = '>=3.4',
install_requires = ['sklearn', 'scipy', 'numpy', 'matplotlib', 'audioio'],
)
| gpl-3.0 |
julienmalard/Tikon | tikon/datos/obs.py | 1 | 2572 | import numpy as np
import pandas as pd
import xarray as xr
from tikon.utils import EJE_TIEMPO, EJE_PARC
class Obs(object):
def __init__(símismo, datos):
símismo.datos = datos
def fechas(símismo):
tiempos = símismo.datos[EJE_TIEMPO].values
if np.issubdtype(tiempos.dtype, np.datetime64):
return (pd.Timestamp(tiempos.min()), pd.Timestamp(tiempos.max())), 0
else:
return (None, None), tiempos.max()
def proc_res(símismo, res):
if not np.issubdtype(símismo.datos[EJE_TIEMPO].values.dtype, np.datetime64):
res = res.copy()
res[EJE_TIEMPO] = np.array(
[x.days for x in pd.to_datetime(res[EJE_TIEMPO].values) - pd.to_datetime(res[EJE_TIEMPO].values[0])]
)
return res
@property
def mód(símismo):
raise NotImplementedError
@property
def var(símismo):
raise NotImplementedError
@classmethod
def de_cuadro(cls, datos_pd, corresp, eje_principal, parc=None, tiempo=None, coords=None, factor=1, **argsll):
if isinstance(datos_pd, str):
datos_pd = pd.read_csv(datos_pd, encoding='utf8')
corresp = corresp or {}
for ll, v in corresp.items():
if isinstance(v, list):
corresp[ll] = tuple(v)
coords = {
EJE_PARC: parc or EJE_PARC,
EJE_TIEMPO: tiempo or EJE_TIEMPO,
**(coords or {})
}
coords_xr = coords.copy()
for dim, crd in coords.items():
if isinstance(dim, str) and crd in datos_pd.columns:
coords_xr[dim] = datos_pd[crd].unique()
else:
coords_xr[dim] = [crd]
coords_xr[eje_principal] = list(corresp.values())
datos = xr.DataArray(np.nan, coords=coords_xr, dims=list(coords_xr))
for f in datos_pd.iterrows():
d = f[1]
índs = {
**{dim: d[vl] if isinstance(vl, str) and vl in d else vl for dim, vl in coords.items()},
**{eje_principal: [corresp[x] for x in list(d.axes[0]) if x in corresp]}
}
vals = d[[x for x in list(d.axes[0]) if x in corresp]]
datos.loc[índs] = vals * factor
datos.coords[EJE_PARC] = [str(prc) for prc in datos.coords[EJE_PARC].values]
return cls(datos)
def __contains__(símismo, itema):
coords = {ll: v.values for ll, v in símismo.datos.coords.items()}
return all(ll in coords and v in coords[ll] for ll, v in itema.items())
| agpl-3.0 |
JohnWinter/ThinkStats2 | code/survival.py | 65 | 17881 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import numpy as np
import pandas
import nsfg
import thinkstats2
import thinkplot
"""
Outcome codes from http://www.icpsr.umich.edu/nsfg6/Controller?
displayPage=labelDetails&fileCode=PREG§ion=&subSec=8016&srtLabel=611932
1 LIVE BIRTH 9148
2 INDUCED ABORTION 1862
3 STILLBIRTH 120
4 MISCARRIAGE 1921
5 ECTOPIC PREGNANCY 190
6 CURRENT PREGNANCY 352
"""
FORMATS = ['pdf', 'eps', 'png']
class SurvivalFunction(object):
"""Represents a survival function."""
def __init__(self, cdf, label=''):
self.cdf = cdf
self.label = label or cdf.label
@property
def ts(self):
return self.cdf.xs
@property
def ss(self):
return 1 - self.cdf.ps
def __getitem__(self, t):
return self.Prob(t)
def Prob(self, t):
"""Returns S(t), the probability that corresponds to value t.
t: time
returns: float probability
"""
return 1 - self.cdf.Prob(t)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Mean(self):
"""Mean survival time."""
return self.cdf.Mean()
def Items(self):
"""Sorted list of (t, s) pairs."""
return zip(self.ts, self.ss)
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, survival function)
"""
return self.ts, self.ss
def MakeHazard(self, label=''):
"""Computes the hazard function.
sf: survival function
returns: Pmf that maps times to hazard rates
"""
ss = self.ss
lams = {}
for i, t in enumerate(self.ts[:-1]):
hazard = (ss[i] - ss[i+1]) / ss[i]
lams[t] = hazard
return HazardFunction(lams, label=label)
def MakePmf(self, filler=None):
"""Makes a PMF of lifetimes.
filler: value to replace missing values
returns: Pmf
"""
pmf = thinkstats2.Pmf()
for val, prob in self.cdf.Items():
pmf.Set(val, prob)
cutoff = self.cdf.ps[-1]
if filler is not None:
pmf[filler] = 1-cutoff
return pmf
def RemainingLifetime(self, filler=None, func=thinkstats2.Pmf.Mean):
"""Computes remaining lifetime as a function of age.
func: function from conditional Pmf to expected liftime
returns: Series that maps from age to remaining lifetime
"""
pmf = self.MakePmf(filler=filler)
d = {}
for t in sorted(pmf.Values())[:-1]:
pmf[t] = 0
pmf.Normalize()
d[t] = func(pmf) - t
#print(t, d[t])
return pandas.Series(d)
class HazardFunction(object):
"""Represents a hazard function."""
def __init__(self, d, label=''):
"""Initialize the hazard function.
d: dictionary (or anything that can initialize a series)
label: string
"""
self.series = pandas.Series(d)
self.label = label
def __getitem__(self, t):
return self.series[t]
def Render(self):
"""Generates a sequence of points suitable for plotting.
returns: tuple of (sorted times, hazard function)
"""
return self.series.index, self.series.values
def MakeSurvival(self, label=''):
"""Makes the survival function.
returns: SurvivalFunction
"""
ts = self.series.index
ss = (1 - self.series).cumprod()
cdf = thinkstats2.Cdf(ts, 1-ss)
sf = SurvivalFunction(cdf, label=label)
return sf
def Extend(self, other):
"""Extends this hazard function by copying the tail from another.
other: HazardFunction
"""
last = self.series.index[-1]
more = other.series[other.series.index > last]
self.series = pandas.concat([self.series, more])
def ConditionalSurvival(pmf, t0):
"""Computes conditional survival function.
Probability that duration exceeds t0+t, given that
duration >= t0.
pmf: Pmf of durations
t0: minimum time
returns: tuple of (ts, conditional survivals)
"""
cond = thinkstats2.Pmf()
for t, p in pmf.Items():
if t >= t0:
cond.Set(t-t0, p)
return SurvivalFunction(thinkstats2.Cdf(cond))
def PlotConditionalSurvival(durations):
"""Plots conditional survival curves for a range of t0.
durations: list of durations
"""
pmf = thinkstats2.Pmf(durations)
times = [8, 16, 24, 32]
thinkplot.PrePlot(len(times))
for t0 in times:
sf = ConditionalSurvival(pmf, t0)
label = 't0=%d' % t0
thinkplot.Plot(sf, label=label)
thinkplot.Show()
def PlotSurvival(complete):
"""Plots survival and hazard curves.
complete: list of complete lifetimes
"""
thinkplot.PrePlot(3, rows=2)
cdf = thinkstats2.Cdf(complete, label='cdf')
sf = SurvivalFunction(cdf, label='survival')
print(cdf[13])
print(sf[13])
thinkplot.Plot(sf)
thinkplot.Cdf(cdf, alpha=0.2)
thinkplot.Config()
thinkplot.SubPlot(2)
hf = sf.MakeHazard(label='hazard')
print(hf[39])
thinkplot.Plot(hf)
thinkplot.Config(ylim=[0, 0.75])
def PlotHazard(complete, ongoing):
"""Plots the hazard function and survival function.
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
"""
# plot S(t) based on only complete pregnancies
cdf = thinkstats2.Cdf(complete)
sf = SurvivalFunction(cdf)
thinkplot.Plot(sf, label='old S(t)', alpha=0.1)
thinkplot.PrePlot(2)
# plot the hazard function
hf = EstimateHazardFunction(complete, ongoing)
thinkplot.Plot(hf, label='lams(t)', alpha=0.5)
# plot the survival function
sf = hf.MakeSurvival()
thinkplot.Plot(sf, label='S(t)')
thinkplot.Show(xlabel='t (weeks)')
def EstimateHazardFunction(complete, ongoing, label='', shift=1e-7):
"""Estimates the hazard function by Kaplan-Meier.
http://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator
complete: list of complete lifetimes
ongoing: list of ongoing lifetimes
label: string
shift: presumed additional survival of ongoing
"""
# pmf and sf of complete lifetimes
n = len(complete)
hist_complete = thinkstats2.Hist(complete)
sf_complete = SurvivalFunction(thinkstats2.Cdf(complete))
# sf for ongoing lifetimes
# The shift is a regrettable hack needed to deal with simultaneity.
# If a case is complete at some t and another case is ongoing
# at t, we presume that the ongoing case exceeds t+shift.
m = len(ongoing)
cdf = thinkstats2.Cdf(ongoing).Shift(shift)
sf_ongoing = SurvivalFunction(cdf)
lams = {}
for t, ended in sorted(hist_complete.Items()):
at_risk = ended + n * sf_complete[t] + m * sf_ongoing[t]
lams[t] = ended / at_risk
#print(t, ended, n * sf_complete[t], m * sf_ongoing[t], at_risk)
return HazardFunction(lams, label=label)
def CleanData(resp):
"""Cleans a respondent DataFrame.
resp: DataFrame of respondents
"""
resp.cmmarrhx.replace([9997, 9998, 9999], np.nan, inplace=True)
resp['agemarry'] = (resp.cmmarrhx - resp.cmbirth) / 12.0
resp['age'] = (resp.cmintvw - resp.cmbirth) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def AddLabelsByDecade(groups, **options):
"""Draws fake points in order to add labels to the legend.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, _ in groups:
label = '%d0s' % name
thinkplot.Plot([15], [1], label=label, **options)
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for _, group in groups:
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def PlotPredictionsByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
hfs = []
for _, group in groups:
hf, sf = EstimateSurvival(group)
hfs.append(hf)
thinkplot.PrePlot(len(hfs))
for i, hf in enumerate(hfs):
if i > 0:
hf.Extend(hfs[i-1])
sf = hf.MakeSurvival()
thinkplot.Plot(sf, **options)
def ResampleSurvival(resp, iters=101):
"""Resamples respondents and estimates the survival function.
resp: DataFrame of respondents
iters: number of resamples
"""
_, sf = EstimateSurvival(resp)
thinkplot.Plot(sf)
low, high = resp.agemarry.min(), resp.agemarry.max()
ts = np.arange(low, high, 1/12.0)
ss_seq = []
for _ in range(iters):
sample = thinkstats2.ResampleRowsWeighted(resp)
_, sf = EstimateSurvival(sample)
ss_seq.append(sf.Probs(ts))
low, high = thinkstats2.PercentileRows(ss_seq, [5, 95])
thinkplot.FillBetween(ts, low, high, color='gray', label='90% CI')
thinkplot.Save(root='survival3',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[12, 46],
ylim=[0, 1],
formats=FORMATS)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.evrmarry == 1].agemarry
ongoing = resp[resp.evrmarry == 0].age
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def PlotMarriageData(resp):
"""Plots hazard and survival functions.
resp: DataFrame of respondents
"""
hf, sf = EstimateSurvival(resp)
thinkplot.PrePlot(rows=2)
thinkplot.Plot(hf)
thinkplot.Config(legend=False)
thinkplot.SubPlot(2)
thinkplot.Plot(sf)
thinkplot.Save(root='survival2',
xlabel='age (years)',
ylabel='prob unmarried',
ylim=[0, 1],
legend=False,
formats=FORMATS)
return sf
def PlotPregnancyData(preg):
"""Plots survival and hazard curves based on pregnancy lengths.
preg:
"""
complete = preg.query('outcome in [1, 3, 4]').prglngth
print('Number of complete pregnancies', len(complete))
ongoing = preg[preg.outcome == 6].prglngth
print('Number of ongoing pregnancies', len(ongoing))
PlotSurvival(complete)
thinkplot.Save(root='survival1',
xlabel='t (weeks)',
formats=FORMATS)
hf = EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return sf
def PlotRemainingLifetime(sf1, sf2):
"""Plots remaining lifetimes for pregnancy and age at first marriage.
sf1: SurvivalFunction for pregnancy length
sf2: SurvivalFunction for age at first marriage
"""
thinkplot.PrePlot(cols=2)
rem_life1 = sf1.RemainingLifetime()
thinkplot.Plot(rem_life1)
thinkplot.Config(title='pregnancy length',
xlabel='weeks',
ylabel='mean remaining weeks')
thinkplot.SubPlot(2)
func = lambda pmf: pmf.Percentile(50)
rem_life2 = sf2.RemainingLifetime(filler=np.inf, func=func)
thinkplot.Plot(rem_life2)
thinkplot.Config(title='age at first marriage',
ylim=[0, 15],
xlim=[11, 31],
xlabel='age (years)',
ylabel='median remaining years')
thinkplot.Save(root='survival6',
formats=FORMATS)
def ReadFemResp(dct_file='2002FemResp.dct',
dat_file='2002FemResp.dat.gz',
**options):
"""Reads the NSFG respondent data.
dct_file: string file name
dat_file: string file name
returns: DataFrame
"""
dct = thinkstats2.ReadStataDct(dct_file, encoding='iso-8859-1')
df = dct.ReadFixedWidth(dat_file, compression='gzip', **options)
CleanData(df)
return df
def ReadFemResp2002():
"""Reads respondent data from NSFG Cycle 6.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'finalwgt']
resp = ReadFemResp(usecols=usecols)
CleanData(resp)
return resp
def ReadFemResp2010():
"""Reads respondent data from NSFG Cycle 7.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgtq1q16']
resp = ReadFemResp('2006_2010_FemRespSetup.dct',
'2006_2010_FemResp.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgtq1q16
CleanData(resp)
return resp
def ReadFemResp2013():
"""Reads respondent data from NSFG Cycle 8.
returns: DataFrame
"""
usecols = ['cmmarrhx', 'cmdivorcx', 'cmbirth', 'cmintvw',
'evrmarry', 'wgt2011_2013']
resp = ReadFemResp('2011_2013_FemRespSetup.dct',
'2011_2013_FemRespData.dat.gz',
usecols=usecols)
resp['finalwgt'] = resp.wgt2011_2013
CleanData(resp)
return resp
def ReadFemResp1995():
"""Reads respondent data from NSFG Cycle 5.
returns: DataFrame
"""
dat_file = '1995FemRespData.dat.gz'
names = ['a_doi', 'timesmar', 'mardat01', 'bdaycenm', 'post_wt']
colspecs = [(12359, 12363),
(3538, 3540),
(11758, 11762),
(13, 16),
(12349, 12359)]
df = pandas.read_fwf(dat_file,
compression='gzip',
colspecs=colspecs,
names=names)
df['cmmarrhx'] = df.mardat01
df['cmbirth'] = df.bdaycenm
df['cmintvw'] = df.a_doi
df['finalwgt'] = df.post_wt
df.timesmar.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.timesmar > 0).astype(int)
CleanData(df)
return df
def ReadFemResp1982():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1982NSFGData.dat.gz'
names = ['cmmarrhx', 'MARNO', 'cmintvw', 'cmbirth', 'finalwgt']
#actual = ['MARIMO', 'MARNO', 'TL', 'TL', 'W5']
colspecs = [(1028, 1031),
(1258, 1259),
(841, 844),
(12, 15),
(976, 982)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
df.MARNO.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.MARNO > 0).astype(int)
CleanData(df)
return df[:7969]
def ReadFemResp1988():
"""Reads respondent data from NSFG Cycle 4.
returns: DataFrame
"""
dat_file = '1988FemRespData.dat.gz'
names = ['F_13'] #['CMOIMO', 'F_13', 'F19M1MO', 'A_3']
# colspecs = [(799, 803)],
colspecs = [(20, 22)]#,
# (1538, 1542),
# (26, 30),
# (2568, 2574)]
df = pandas.read_fwf(dat_file, compression='gzip', colspecs=colspecs, names=names)
# df['cmmarrhx'] = df.F19M1MO
# df['cmbirth'] = df.A_3
# df['cmintvw'] = df.CMOIMO
# df['finalwgt'] = df.W5
df.F_13.replace([98, 99], np.nan, inplace=True)
df['evrmarry'] = (df.F_13 > 0).astype(int)
# CleanData(df)
return df
def PlotResampledByDecade(resps, iters=11, predict_flag=False, omit=None):
"""Plots survival curves for resampled data.
resps: list of DataFrames
iters: number of resamples to plot
predict_flag: whether to also plot predictions
"""
for i in range(iters):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if omit:
groups = [(name, group) for name, group in groups
if name not in omit]
# TODO: refactor this to collect resampled estimates and
# plot shaded areas
if i == 0:
AddLabelsByDecade(groups, alpha=0.7)
if predict_flag:
PlotPredictionsByDecade(groups, alpha=0.1)
EstimateSurvivalByDecade(groups, alpha=0.1)
else:
EstimateSurvivalByDecade(groups, alpha=0.2)
def main():
thinkstats2.RandomSeed(17)
preg = nsfg.ReadFemPreg()
sf1 = PlotPregnancyData(preg)
# make the plots based on Cycle 6
resp6 = ReadFemResp2002()
sf2 = PlotMarriageData(resp6)
ResampleSurvival(resp6)
PlotRemainingLifetime(sf1, sf2)
# read Cycles 5 and 7
resp5 = ReadFemResp1995()
resp7 = ReadFemResp2010()
# plot resampled survival functions by decade
resps = [resp5, resp6, resp7]
PlotResampledByDecade(resps)
thinkplot.Save(root='survival4',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
# plot resampled survival functions by decade, with predictions
PlotResampledByDecade(resps, predict_flag=True, omit=[5])
thinkplot.Save(root='survival5',
xlabel='age (years)',
ylabel='prob unmarried',
xlim=[13, 45],
ylim=[0, 1],
formats=FORMATS)
if __name__ == '__main__':
main()
| gpl-3.0 |
mhdella/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
bavardage/statsmodels | statsmodels/sandbox/examples/ex_gam_results.py | 37 | 1660 | # -*- coding: utf-8 -*-
"""Example results for GAM from tests
Created on Mon Nov 07 13:13:15 2011
Author: Josef Perktold
The example is loaded from a test module. The test still fails but the
results look relatively good.
I don't know yet why there is the small difference and why GAM doesn't
converge in this case
"""
from statsmodels.sandbox.tests.test_gam import _estGAMGaussianLogLink
tt = _estGAMGaussianLogLink()
comp, const = tt.res_gam.smoothed_demeaned(tt.mod_gam.exog)
comp_glm_ = tt.res2.model.exog * tt.res2.params
comp1 = comp_glm_[:,1:4].sum(1)
mean1 = comp1.mean()
comp1 -= mean1
comp2 = comp_glm_[:,4:].sum(1)
mean2 = comp2.mean()
comp2 -= mean2
comp1_true = tt.res2.model.exog[:,1:4].sum(1)
mean1 = comp1_true.mean()
comp1_true -= mean1
comp2_true = tt.res2.model.exog[:,4:].sum(1)
mean2 = comp2_true.mean()
comp2_true -= mean2
noise = tt.res2.model.endog - tt.mu_true
noise_eta = tt.family.link(tt.res2.model.endog) - tt.y_true
import matplotlib.pyplot as plt
plt.figure()
plt.plot(noise, 'k.')
plt.figure()
plt.plot(comp, 'r-')
plt.plot(comp1, 'b-')
plt.plot(comp2, 'b-')
plt.plot(comp1_true, 'k--', lw=2)
plt.plot(comp2_true, 'k--', lw=2)
#the next doesn't make sense - non-linear
#c1 = tt.family.link(tt.family.link.inverse(comp1_true) + noise)
#c2 = tt.family.link(tt.family.link.inverse(comp2_true) + noise)
#not nice in example/plot: noise variance is constant not proportional
plt.plot(comp1_true + noise_eta, 'g.', alpha=0.95)
plt.plot(comp2_true + noise_eta, 'r.', alpha=0.95)
#plt.plot(c1, 'g.', alpha=0.95)
#plt.plot(c2, 'r.', alpha=0.95)
plt.title('Gaussian loglink, GAM (red), GLM (blue), true (black)')
plt.show()
| bsd-3-clause |
drammock/mne-python | mne/conftest.py | 1 | 23095 | # -*- coding: utf-8 -*-
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
from distutils.version import LooseVersion
import gc
import os
import os.path as op
from pathlib import Path
import shutil
import sys
import warnings
import pytest
import numpy as np
import mne
from mne.datasets import testing
from mne.fixes import has_numba
from mne.stats import cluster_level
from mne.utils import _pl, _assert_no_instances, numerics
test_path = testing.data_path(download=False)
s_path = op.join(test_path, 'MEG', 'sample')
fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
bem_path = op.join(test_path, 'subjects', 'sample', 'bem')
fname_bem = op.join(bem_path, 'sample-1280-bem.fif')
fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
subjects_dir = op.join(test_path, 'subjects')
fname_src = op.join(bem_path, 'sample-oct-4-src.fif')
subjects_dir = op.join(test_path, 'subjects')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif')
collect_ignore = ['export/_eeglab.py']
def pytest_configure(config):
"""Configure pytest options."""
# Markers
for marker in ('slowtest', 'ultraslowtest'):
config.addinivalue_line('markers', marker)
# Fixtures
for fixture in ('matplotlib_config',):
config.addinivalue_line('usefixtures', fixture)
# Warnings
# - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
# we should remove them from here.
# - This list should also be considered alongside reset_warnings in
# doc/conf.py.
warning_lines = r"""
error::
ignore:.*deprecated and ignored since IPython.*:DeprecationWarning
ignore::ImportWarning
ignore:the matrix subclass:PendingDeprecationWarning
ignore:numpy.dtype size changed:RuntimeWarning
ignore:.*HasTraits.trait_.*:DeprecationWarning
ignore:.*takes no parameters:DeprecationWarning
ignore:joblib not installed:RuntimeWarning
ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning
ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning
ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning
ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning
ignore:The oldnumeric module will be dropped:DeprecationWarning
ignore:Collection picker None could not be converted to float:UserWarning
ignore:covariance is not positive-semidefinite:RuntimeWarning
ignore:Can only plot ICA components:RuntimeWarning
ignore:Matplotlib is building the font cache using fc-list:UserWarning
ignore:Using or importing the ABCs from 'collections':DeprecationWarning
ignore:`formatargspec` is deprecated:DeprecationWarning
# This is only necessary until sklearn updates their wheels for NumPy 1.16
ignore:numpy.ufunc size changed:RuntimeWarning
ignore:.*mne-realtime.*:DeprecationWarning
ignore:.*imp.*:DeprecationWarning
ignore:Exception creating Regex for oneOf.*:SyntaxWarning
ignore:scipy\.gradient is deprecated.*:DeprecationWarning
ignore:sklearn\.externals\.joblib is deprecated.*:FutureWarning
ignore:The sklearn.*module.*deprecated.*:FutureWarning
ignore:.*trait.*handler.*deprecated.*:DeprecationWarning
ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning
ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning
ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.long` is a deprecated alias:DeprecationWarning
ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning
ignore:.*sphinx\.util\.smartypants is deprecated.*:
ignore:.*pandas\.util\.testing is deprecated.*:
ignore:.*tostring.*is deprecated.*:DeprecationWarning
ignore:.*QDesktopWidget\.availableGeometry.*:DeprecationWarning
ignore:Unable to enable faulthandler.*:UserWarning
ignore:Fetchers from the nilearn.*:FutureWarning
ignore:SelectableGroups dict interface is deprecated\. Use select\.:DeprecationWarning
ignore:Call to deprecated class vtk.*:DeprecationWarning
ignore:Call to deprecated method.*Deprecated since.*:DeprecationWarning
always:.*get_data.* is deprecated in favor of.*:DeprecationWarning
always::ResourceWarning
""" # noqa: E501
for warning_line in warning_lines.split('\n'):
warning_line = warning_line.strip()
if warning_line and not warning_line.startswith('#'):
config.addinivalue_line('filterwarnings', warning_line)
# Have to be careful with autouse=True, but this is just an int comparison
# so it shouldn't really add appreciable overhead
@pytest.fixture(autouse=True)
def check_verbose(request):
"""Set to the default logging level to ensure it's tested properly."""
starting_level = mne.utils.logger.level
yield
# ensures that no tests break the global state
try:
assert mne.utils.logger.level == starting_level
except AssertionError:
pytest.fail('.'.join([request.module.__name__,
request.function.__name__]) +
' modifies logger.level')
@pytest.fixture(autouse=True)
def close_all():
"""Close all matplotlib plots, regardless of test status."""
# This adds < 1 µS in local testing, and we have ~2500 tests, so ~2 ms max
import matplotlib.pyplot as plt
yield
plt.close('all')
@pytest.fixture(autouse=True)
def add_mne(doctest_namespace):
"""Add mne to the namespace."""
doctest_namespace["mne"] = mne
@pytest.fixture(scope='function')
def verbose_debug():
"""Run a test with debug verbosity."""
with mne.utils.use_log_level('debug'):
yield
@pytest.fixture(scope='session')
def matplotlib_config():
"""Configure matplotlib for viz tests."""
import matplotlib
from matplotlib import cbook
# Allow for easy interactive debugging with a call like:
#
# $ MNE_MPL_TESTING_BACKEND=Qt5Agg pytest mne/viz/tests/test_raw.py -k annotation -x --pdb # noqa: E501
#
try:
want = os.environ['MNE_MPL_TESTING_BACKEND']
except KeyError:
want = 'agg' # don't pop up windows
with warnings.catch_warnings(record=True): # ignore warning
warnings.filterwarnings('ignore')
matplotlib.use(want, force=True)
import matplotlib.pyplot as plt
assert plt.get_backend() == want
# overwrite some params that can horribly slow down tests that
# users might have changed locally (but should not otherwise affect
# functionality)
plt.ioff()
plt.rcParams['figure.dpi'] = 100
try:
from traits.etsconfig.api import ETSConfig
except Exception:
pass
else:
ETSConfig.toolkit = 'qt4'
# Make sure that we always reraise exceptions in handlers
orig = cbook.CallbackRegistry
class CallbackRegistryReraise(orig):
def __init__(self, exception_handler=None):
args = ()
if LooseVersion(matplotlib.__version__) >= LooseVersion('2.1'):
args += (exception_handler,)
super(CallbackRegistryReraise, self).__init__(*args)
cbook.CallbackRegistry = CallbackRegistryReraise
@pytest.fixture(scope='session')
def ci_macos():
"""Determine if running on MacOS CI."""
return (os.getenv('CI', 'false').lower() == 'true' and
sys.platform == 'darwin')
@pytest.fixture(scope='session')
def azure_windows():
"""Determine if running on Azure Windows."""
return (os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true' and
sys.platform.startswith('win'))
@pytest.fixture()
def check_gui_ci(ci_macos, azure_windows):
"""Skip tests that are not reliable on CIs."""
if azure_windows or ci_macos:
pytest.skip('Skipping GUI tests on MacOS CIs and Azure Windows')
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked():
# This one is session scoped, so be sure not to modify it (use evoked
# instead)
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
evoked.crop(0, 0.2)
return evoked
@pytest.fixture()
def evoked(_evoked):
"""Get evoked data."""
return _evoked.copy()
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def noise_cov():
"""Get a noise cov from the testing dataset."""
return mne.read_cov(fname_cov)
@pytest.fixture(scope='function')
def bias_params_free(evoked, noise_cov):
"""Provide inputs for free bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
return _bias_params(evoked, noise_cov, fwd)
@pytest.fixture(scope='function')
def bias_params_fixed(evoked, noise_cov):
"""Provide inputs for fixed bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
mne.convert_forward_solution(
fwd, force_fixed=True, surf_ori=True, copy=False)
return _bias_params(evoked, noise_cov, fwd)
def _bias_params(evoked, noise_cov, fwd):
evoked.pick_types(meg=True, eeg=True, exclude=())
# restrict to limited set of verts (small src here) and one hemi for speed
vertices = [fwd['src'][0]['vertno'].copy(), []]
stc = mne.SourceEstimate(
np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1)
fwd = mne.forward.restrict_forward_to_stc(fwd, stc)
assert fwd['sol']['row_names'] == noise_cov['names']
assert noise_cov['names'] == evoked.ch_names
evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)
data_cov = noise_cov.copy()
data = fwd['sol']['data'] @ fwd['sol']['data'].T
data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm)
# This is rank-deficient, so let's make it actually positive semidefinite
# by regularizing a tiny bit
data.flat[::data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)['data']
# Do our projection
proj, _, _ = mne.io.proj.make_projector(
data_cov['projs'], data_cov['names'])
data = proj @ data @ proj.T
data_cov['data'][:] = data
assert data_cov['data'].shape[0] == len(noise_cov['names'])
want = np.arange(fwd['sol']['data'].shape[1])
if not mne.forward.is_fixed_orient(fwd):
want //= 3
return evoked, fwd, noise_cov, data_cov, want
@pytest.fixture
def garbage_collect():
"""Garbage collect on exit."""
yield
gc.collect()
@pytest.fixture(params=["mayavi", "pyvista"])
def renderer(request, garbage_collect):
"""Yield the 3D backends."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(params=["pyvista"])
def renderer_pyvista(request, garbage_collect):
"""Yield the PyVista backend."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(params=["notebook"])
def renderer_notebook(request):
"""Yield the 3D notebook renderer."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(scope="module", params=["pyvista"])
def renderer_interactive_pyvista(request):
"""Yield the interactive PyVista backend."""
with _use_backend(request.param, interactive=True) as renderer:
yield renderer
@pytest.fixture(scope="module", params=["pyvista", "mayavi"])
def renderer_interactive(request):
"""Yield the interactive 3D backends."""
with _use_backend(request.param, interactive=True) as renderer:
if renderer._get_3d_backend() == 'mayavi':
with warnings.catch_warnings(record=True):
try:
from surfer import Brain # noqa: 401 analysis:ignore
except Exception:
pytest.skip('Requires PySurfer')
yield renderer
@contextmanager
def _use_backend(backend_name, interactive):
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name)
with _use_test_3d_backend(backend_name, interactive=interactive):
from mne.viz.backends import renderer
try:
yield renderer
finally:
renderer.backend._close_all()
def _check_skip_backend(name):
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5, has_imageio_ffmpeg)
check_pyvista = name in ('pyvista', 'notebook')
check_pyqt5 = name in ('mayavi', 'pyvista')
if name == 'mayavi':
if not has_mayavi():
pytest.skip("Test skipped, requires mayavi.")
elif name == 'pyvista':
if not has_imageio_ffmpeg():
pytest.skip("Test skipped, requires imageio-ffmpeg")
if check_pyvista and not has_pyvista():
pytest.skip("Test skipped, requires pyvista.")
if check_pyqt5 and not has_pyqt5():
pytest.skip("Test skipped, requires PyQt5.")
@pytest.fixture(scope='session')
def pixel_ratio():
"""Get the pixel ratio."""
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5)
if not (has_mayavi() or has_pyvista()) or not has_pyqt5():
return 1.
from PyQt5.QtWidgets import QApplication, QMainWindow
_ = QApplication.instance() or QApplication([])
window = QMainWindow()
ratio = float(window.devicePixelRatio())
window.close()
return ratio
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def subjects_dir_tmp(tmpdir):
"""Copy MNE-testing-data subjects_dir to a temp dir for manipulation."""
for key in ('sample', 'fsaverage'):
shutil.copytree(op.join(subjects_dir, key), str(tmpdir.join(key)))
return str(tmpdir)
# Scoping these as session will make things faster, but need to make sure
# not to modify them in-place in the tests, so keep them private
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked_cov_sphere(_evoked):
"""Compute a small evoked/cov/sphere combo for use with forwards."""
evoked = _evoked.copy().pick_types(meg=True)
evoked.pick_channels(evoked.ch_names[::4])
assert len(evoked.ch_names) == 77
cov = mne.read_cov(fname_cov)
sphere = mne.make_sphere_model('auto', 'auto', evoked.info)
return evoked, cov, sphere
@pytest.fixture(scope='session')
def _fwd_surf(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
evoked, cov, sphere = _evoked_cov_sphere
src_surf = mne.read_source_spaces(fname_src)
return mne.make_forward_solution(
evoked.info, fname_trans, src_surf, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _fwd_subvolume(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
pytest.importorskip('nibabel')
evoked, cov, sphere = _evoked_cov_sphere
volume_labels = ['Left-Cerebellum-Cortex', 'right-Cerebellum-Cortex']
with pytest.raises(ValueError,
match=r"Did you mean one of \['Right-Cere"):
mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir)
volume_labels[1] = 'R' + volume_labels[1][1:]
src_vol = mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir, add_interpolator=False)
return mne.make_forward_solution(
evoked.info, fname_trans, src_vol, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _all_src_types_fwd(_fwd_surf, _fwd_subvolume):
"""Create all three forward types (surf, vol, mixed)."""
fwds = dict(surface=_fwd_surf, volume=_fwd_subvolume)
with pytest.raises(RuntimeError,
match='Invalid source space with kinds'):
fwds['volume']['src'] + fwds['surface']['src']
# mixed (4)
fwd = fwds['surface'].copy()
f2 = fwds['volume']
for keys, axis in [(('source_rr',), 0),
(('source_nn',), 0),
(('sol', 'data'), 1),
(('_orig_sol',), 1)]:
a, b = fwd, f2
key = keys[0]
if len(keys) > 1:
a, b = a[key], b[key]
key = keys[1]
a[key] = np.concatenate([a[key], b[key]], axis=axis)
fwd['sol']['ncol'] = fwd['sol']['data'].shape[1]
fwd['nsource'] = fwd['sol']['ncol'] // 3
fwd['src'] = fwd['src'] + f2['src']
fwds['mixed'] = fwd
return fwds
@pytest.fixture(scope='session')
def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
invs = dict()
for kind, fwd in _all_src_types_fwd.items():
assert fwd['src'].kind == kind
with pytest.warns(RuntimeWarning, match='has magnitude'):
invs[kind] = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov)
return invs, evoked
@pytest.fixture(scope='function')
def all_src_types_inv_evoked(_all_src_types_inv_evoked):
"""All source types of inverses, allowing for possible modification."""
invs, evoked = _all_src_types_inv_evoked
invs = {key: val.copy() for key, val in invs.items()}
evoked = evoked.copy()
return invs, evoked
@pytest.fixture(scope='function')
def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
return _all_src_types_fwd['mixed'].copy(), cov.copy(), evoked.copy()
@pytest.fixture(scope='session')
@pytest.mark.slowtest
@pytest.mark.parametrize(params=[testing._pytest_param()])
def src_volume_labels():
"""Create a 7mm source space with labels."""
pytest.importorskip('nibabel')
volume_labels = mne.get_volume_labels_from_aseg(fname_aseg)
src = mne.setup_volume_source_space(
'sample', 7., mri='aseg.mgz', volume_label=volume_labels,
add_interpolator=False, bem=fname_bem,
subjects_dir=subjects_dir)
lut, _ = mne.read_freesurfer_lut()
assert len(volume_labels) == 46
assert volume_labels[0] == 'Unknown'
assert lut['Unknown'] == 0 # it will be excluded during label gen
return src, tuple(volume_labels), lut
def _fail(*args, **kwargs):
raise AssertionError('Test should not download')
@pytest.fixture(scope='function')
def download_is_error(monkeypatch):
"""Prevent downloading by raising an error when it's attempted."""
monkeypatch.setattr(mne.utils.fetching, '_get_http', _fail)
@pytest.fixture()
def brain_gc(request):
"""Ensure that brain can be properly garbage collected."""
keys = (
'renderer_interactive',
'renderer_interactive_pyvista',
'renderer_interactive_pysurfer',
'renderer',
'renderer_pyvista',
'renderer_notebook',
)
assert set(request.fixturenames) & set(keys) != set()
for key in keys:
if key in request.fixturenames:
is_pv = request.getfixturevalue(key)._get_3d_backend() == 'pyvista'
close_func = request.getfixturevalue(key).backend._close_all
break
if not is_pv:
yield
return
import pyvista
if LooseVersion(pyvista.__version__) <= LooseVersion('0.26.1'):
yield
return
from mne.viz import Brain
ignore = set(id(o) for o in gc.get_objects())
yield
close_func()
# no need to warn if the test itself failed, pytest-harvest helps us here
try:
outcome = request.node.harvest_rep_call
except Exception:
outcome = 'failed'
if outcome != 'passed':
return
_assert_no_instances(Brain, 'after')
# We only check VTK for PyVista -- Mayavi/PySurfer is not as strict
objs = gc.get_objects()
bad = list()
for o in objs:
try:
name = o.__class__.__name__
except Exception: # old Python, probably
pass
else:
if name.startswith('vtk') and id(o) not in ignore:
bad.append(name)
del o
del objs, ignore, Brain
assert len(bad) == 0, 'VTK objects linger:\n' + '\n'.join(bad)
def pytest_sessionfinish(session, exitstatus):
"""Handle the end of the session."""
n = session.config.option.durations
if n is None:
return
print('\n')
try:
import pytest_harvest
except ImportError:
print('Module-level timings require pytest-harvest')
return
from py.io import TerminalWriter
# get the number to print
res = pytest_harvest.get_session_synthesis_dct(session)
files = dict()
for key, val in res.items():
parts = Path(key.split(':')[0]).parts
# split mne/tests/test_whatever.py into separate categories since these
# are essentially submodule-level tests. Keeping just [:3] works,
# except for mne/viz where we want level-4 granulatity
parts = parts[:4 if parts[:2] == ('mne', 'viz') else 3]
if not parts[-1].endswith('.py'):
parts = parts + ('',)
file_key = '/'.join(parts)
files[file_key] = files.get(file_key, 0) + val['pytest_duration_s']
files = sorted(list(files.items()), key=lambda x: x[1])[::-1]
# print
files = files[:n]
if len(files):
writer = TerminalWriter()
writer.line() # newline
writer.sep('=', f'slowest {n} test module{_pl(n)}')
names, timings = zip(*files)
timings = [f'{timing:0.2f}s total' for timing in timings]
rjust = max(len(timing) for timing in timings)
timings = [timing.rjust(rjust) for timing in timings]
for name, timing in zip(names, timings):
writer.line(f'{timing.ljust(15)}{name}')
@pytest.fixture(scope="function", params=('Numba', 'NumPy'))
def numba_conditional(monkeypatch, request):
"""Test both code paths on machines that have Numba."""
assert request.param in ('Numba', 'NumPy')
if request.param == 'NumPy' and has_numba:
monkeypatch.setattr(
cluster_level, '_get_buddies', cluster_level._get_buddies_fallback)
monkeypatch.setattr(
cluster_level, '_get_selves', cluster_level._get_selves_fallback)
monkeypatch.setattr(
cluster_level, '_where_first', cluster_level._where_first_fallback)
monkeypatch.setattr(
numerics, '_arange_div', numerics._arange_div_fallback)
if request.param == 'Numba' and not has_numba:
pytest.skip('Numba not installed')
yield request.param
| bsd-3-clause |
mariusvniekerk/ibis | ibis/expr/tests/test_timestamp.py | 3 | 3624 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import ibis
import ibis.expr.api as api
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
class TestTimestamp(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.alltypes = self.con.table('alltypes')
self.col = self.alltypes.i
def test_field_select(self):
assert isinstance(self.col, ir.TimestampArray)
def test_string_cast_to_timestamp(self):
casted = self.alltypes.g.cast('timestamp')
assert isinstance(casted, ir.TimestampArray)
string = api.literal('2000-01-01')
casted = string.cast('timestamp')
assert isinstance(casted, ir.TimestampScalar)
def test_extract_fields(self):
# type-size may be database specific
cases = [
('year', ops.ExtractYear, ir.Int32Array),
('month', ops.ExtractMonth, ir.Int32Array),
('day', ops.ExtractDay, ir.Int32Array),
('hour', ops.ExtractHour, ir.Int32Array),
('minute', ops.ExtractMinute, ir.Int32Array),
('second', ops.ExtractSecond, ir.Int32Array),
('millisecond', ops.ExtractMillisecond, ir.Int32Array),
]
for attr, ex_op, ex_type in cases:
result = getattr(self.col, attr)()
assert result.get_name() == attr
assert isinstance(result, ex_type)
assert isinstance(result.op(), ex_op)
def test_now(self):
result = api.now()
assert isinstance(result, ir.TimestampScalar)
assert isinstance(result.op(), ops.TimestampNow)
def test_timestamp_literals(self):
ts_str = '2015-01-01 00:00:00'
val = pd.Timestamp(ts_str)
expr = ibis.literal(val)
assert isinstance(expr, ir.TimestampScalar)
expr = ibis.timestamp(ts_str)
assert isinstance(expr, ir.TimestampScalar)
self.assertRaises(ValueError, ibis.timestamp, '2015-01-01 00:71')
def test_integer_to_timestamp(self):
# #246
pass
def test_comparison_timestamp(self):
expr = self.col > (self.col.min() + ibis.day(3))
assert isinstance(expr, ir.BooleanArray)
def test_comparisons_string(self):
val = '2015-01-01 00:00:00'
expr = self.col > val
op = expr.op()
assert isinstance(op.right, ir.TimestampScalar)
expr2 = val < self.col
op = expr2.op()
assert isinstance(op, ops.Greater)
assert isinstance(op.right, ir.TimestampScalar)
def test_comparisons_pandas_timestamp(self):
val = pd.Timestamp('2015-01-01 00:00:00')
expr = self.col > val
op = expr.op()
assert isinstance(op.right, ir.TimestampScalar)
# TODO: this is broken for now because of upstream pandas problems
# expr2 = val < self.col
# op = expr2.op()
# assert isinstance(op, ops.Greater)
# assert isinstance(op.right, ir.TimestampScalar)
| apache-2.0 |
eickenberg/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
cysuncn/python | crawler/rent/dataAnalyse/ziroomAnalysis.py | 1 | 13803 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 12:16:17 2017
@author: zhanglu01
"""
import json
import pandas as pd
import matplotlib.pyplot as plot
import ziroomAnalysis.geohash as geohash
def line_json_load(filename):
with open(filename, 'r', encoding='utf-8') as f:
lines = f.readlines()
df = pd.DataFrame()
i = 0
for line in lines:
tmp_df = pd.DataFrame(json.loads(line), index=[i])
tmp_df["price"] = tmp_df["price"].astype("int")
tmp_df["area"] = tmp_df["area"].astype("float")
tmp_df["lng"] = tmp_df["lng"].astype("float")
tmp_df["lat"] = tmp_df["lat"].astype("float")
if tmp_df.iloc[0]["time_unit"] == "每天":
tmp_df.price[i] = tmp_df.price[i]*30
df = df.append(tmp_df)
i += 1
return df
filename = 'F:/PyWorkspace/ziroomAnalysis/0729/ziroomBeijing.json'
df = line_json_load(filename)
df = df.drop_duplicates()
df = df[(df['time_unit']!='每天') & (df['direction']!='南北') & (df['floorLoc']!='') & (df['floorTotal']!='')]
#不同租赁方式的统计量
#df["price_per_m2"] = df["price"]/df["area"]
groups = df.groupby(df["rentType"])
rt_count = groups.size()
rt_mean = groups.mean().rename(columns={'price':'mean_price'})
rt_max = groups.max().rename(columns={'price':'max_price'})
rt_min = groups.min().rename(columns={'price':'min_price'})
rt_median = groups.median().rename(columns={'price':'median_price'})
rentTypeDf = pd.concat([rt_mean["mean_price"],pd.DataFrame(rt_count,columns=["count"]),rt_max["max_price"],rt_min["min_price"],rt_median["median_price"]],axis=1)
#df[df['price']==990]["link"]
############合租分析############
#每100元为区间段统计数量
he_intervals = {100*x:0 for x in range(64)}
for price in df[df['rentType']=='合']['price']:
he_intervals[price//100*100] += 1
plot.bar(he_intervals.keys(), he_intervals.values(), width=100, alpha = .5, color = 'blue')
plot.xlabel(u"月租(元)", fontproperties='SimHei')
plot.ylabel(u"房间数量", fontproperties='SimHei')
plot.show()
#将经纬度转换成字符串编码,将同一个格子里的点合并,目的是减少热力图中的打点数量
geohash_dict = dict()
for house in df[df['rentType']=='合'].iterrows():
geohash_code = geohash.encode(house[1]["lat"], house[1]["lng"], 6)
if geohash_code in geohash_dict.keys():
geohash_dict[geohash_code] += 1
else:
geohash_dict[geohash_code] = 1
#将he_position_str的值替换“房间数量热力图.html”中相应的值
he_position_str = ""
for code in geohash_dict:
he_position_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(geohash.decode_exactly(code)[1],geohash.decode_exactly(code)[0],geohash_dict[code])
#将he_position_price_str的值替换“价格在地图上的分布.html”中相应的值
he_position_price_str = ""
for house in df[df['rentType']=='合'].iterrows():
if house[1]["price"]<2000:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],5)
elif house[1]["price"]<3000:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],10)
else:
he_position_price_str += '{{"lng": {0}, "lat": {1}, "count": {2}}},\n'.format(house[1]["lng"],house[1]["lat"],15)
############################地理位置聚类############################
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
#用来评估簇的个数是否合适,每个簇的样本点到这个簇的中心点的距离之和越小说明簇分的越好,选取临界点的簇个数
__clfInertia__ = []
for i in range(2,30,1):
clf = KMeans(n_clusters=i)
s = clf.fit(df[(df['rentType']=='合') & (df['price']>=3000)][["lng", "lat"]])
__clfInertia__.append([i, clf.inertia_])
plt.plot([x[0] for x in __clfInertia__], [x[1] for x in __clfInertia__],'b*')
plt.plot([x[0] for x in __clfInertia__], [x[1] for x in __clfInertia__],'r')
#调用kmeans类
clf = KMeans(n_clusters=4)
s = clf.fit(df[(df['rentType']=='合') & (df['price']>=3000)][["lng", "lat"]])
print(s)
#n个中心
print(clf.cluster_centers_)
############################随机森林回归############################
from math import radians,sin,cos,degrees,atan2,atan,tan,acos
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
def getDegree(latA, lngA, latB, lngB):
"""
Args:
point p1(latA, lngA)
point p2(latB, lngB)
Returns:
bearing between the two GPS points,
default: the basis of heading direction is north
"""
radLatA = radians(latA)
radLngA = radians(lngA)
radLatB = radians(latB)
radLngB = radians(lngB)
dLng = radLngB - radLngA
y = sin(dLng) * cos(radLatB)
x = cos(radLatA) * sin(radLatB) - sin(radLatA) * cos(radLatB) * cos(dLng)
brng = degrees(atan2(y, x))
brng = (brng + 360) % 360
return brng
def getDistance(latA, lngA, latB, lngB):
ra = 6378140 # radius of equator: meter
rb = 6356755 # radius of polar: meter
flatten = (ra - rb) / ra # Partial rate of the earth
# change angle to radians
radLatA = radians(latA)
radLngA = radians(lngA)
radLatB = radians(latB)
radLngB = radians(lngB)
pA = atan(rb / ra * tan(radLatA))
pB = atan(rb / ra * tan(radLatB))
x = acos(sin(pA) * sin(pB) + cos(pA) * cos(pB) * cos(radLngA - radLngB))
c1 = (sin(x) - x) * (sin(pA) + sin(pB))**2 / cos(x / 2)**2
c2 = (sin(x) + x) * (sin(pA) - sin(pB))**2 / sin(x / 2)**2
dr = flatten / 8 * (c1 - c2)
distance = ra * (x + dr)
return distance
df['degree'] = df.apply(lambda row: getDegree(39.915129,116.403981,row.lat,row.lng), axis=1)
df['distance'] = df.apply(lambda row: getDistance(39.915129,116.403981,row.lat,row.lng), axis=1)
#df['distance1'] = df.apply(lambda row: getDistance(39.93573198,116.33882039,row.lat,row.lng), axis=1)
#df['distance2'] = df.apply(lambda row: getDistance(39.9934964,116.45926247,row.lat,row.lng), axis=1)
#df['distance3'] = df.apply(lambda row: getDistance(39.91515228,116.4790283,row.lat,row.lng), axis=1)
#df['distance4'] = df.apply(lambda row: getDistance(40.04388111,116.35319092,row.lat,row.lng), axis=1)
#df['distance5'] = df.apply(lambda row: getDistance(39.929654,116.403119,row.lat,row.lng), axis=1)
rf_data = df[(df.rentType=='合') & (df.time_unit!='每天') & (df.floorLoc!='') & (df.floorTotal!='')][['area','confGen','confType','direction','floorLoc','floorTotal','nearestSubWayDist','privateBalcony','privateBathroom','rooms','halls','district','degree','distance','link','price']]
rf_data = rf_data.reset_index(drop=True) #重置索引
confGenLe = LabelEncoder()
rf_data['confGen']=confGenLe.fit_transform(rf_data['confGen'])
list(confGenLe.classes_)
confTypeLe = LabelEncoder()
rf_data['confType']=confTypeLe.fit_transform(rf_data['confType'])
list(confTypeLe.classes_)
directionLe = LabelEncoder()
rf_data['direction']=directionLe.fit_transform(rf_data['direction'])
list(directionLe.classes_)
districtLe = LabelEncoder()
rf_data['district']=districtLe.fit_transform(rf_data['district'])
list(districtLe.classes_)
rf_data.nearestSubWayDist = rf_data.nearestSubWayDist.replace('','5000')
#one-hot encoding
def one_hot_encode(label_set,data):
oneHotEnc = OneHotEncoder()
oneHotEnc.fit(label_set)
result=oneHotEnc.transform(data).toarray()
return result
oneHotEncodes = one_hot_encode(
[[0,0,0,0],[1,1,1,1],[2,2,2,2],[3,3,3,3],[0,4,4,4],[0,5,5,5],[0,0,6,6],[0,0,7,7],[0,0,0,8],[0,0,0,9],[0,0,0,10],[0,0,0,11],[0,0,0,12]],
rf_data[['confGen','confType','direction','district']])
#将二维list转dataframe
one_hot_columns = ["confGen0", "confGen1", "confGen2", "confGen3",
"confType0", "confType1", "confType2", "confType3", "confType4", "confType5",
"direction0", "direction1", "direction2", "direction3", "direction4", "direction5", "direction6", "direction7",
"district0", "district1", "district2", "district3", "district4", "district5", "district6", "district7", "district8", "district9", "district10", "district11", "district12"]
rf_data[one_hot_columns] = pd.DataFrame(oneHotEncodes,columns=one_hot_columns)
rf_data=rf_data.drop(['confGen','confType','direction','district'],axis=1)
tmp_link=rf_data[['link','price']]
rf_data=rf_data.drop(['link','price'],axis=1)
rf_data[['link','price']]=tmp_link
X_train, X_test, y_train, y_test = train_test_split(rf_data.iloc[:,0:42], rf_data.iloc[:,[42]], test_size=0.33, random_state=42)
#训练模型_start
#首先对n_estimators进行网格搜索
param_test1= {'n_estimators':list(range(450,550,10))}
gsearch1= GridSearchCV(estimator = RandomForestRegressor(max_features="log2", min_samples_leaf=2, oob_score=True), param_grid =param_test1, scoring=None, cv=5)
gsearch1.fit(X_train.iloc[:,0:18],y_train)
gsearch1.grid_scores_,gsearch1.best_params_, gsearch1.best_score_
#接着对决策树最大深度max_depth和内部节点再划分所需最小样本数min_samples_split进行网格搜索。
param_test2= {'max_depth':list(range(80,100,2)), 'min_samples_split':list(range(2,101,2))}
gsearch2= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50, max_features="log2", min_samples_leaf=2, oob_score=True), param_grid = param_test2,scoring=None,iid=False, cv=5)
gsearch2.fit(X_train.iloc[:,0:18],y_train)
gsearch2.grid_scores_,gsearch2.best_params_, gsearch2.best_score_
#再对内部节点再划分所需最小样本数min_samples_split和叶子节点最少样本数min_samples_leaf一起调参
param_test3= {'min_samples_split':list(range(2,10,2)), 'min_samples_leaf':list(range(2,20,2))}
gsearch3= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50, max_features="log2",max_depth=96, oob_score=True), param_grid = param_test3,scoring=None,iid=False, cv=5)
gsearch3.fit(X_train.iloc[:,0:18],y_train)
gsearch3.grid_scores_,gsearch3.best_params_, gsearch3.best_score_
#最后再对最大特征数max_features做调参:
param_test4= {'max_features':list(range(2,17,1))}
gsearch4= GridSearchCV(estimator = RandomForestRegressor(n_estimators=50,max_depth=96,min_samples_split=4,min_samples_leaf=2, oob_score=True), param_grid = param_test4,scoring=None,iid=False, cv=5)
gsearch4.fit(X_train.iloc[:,0:18],y_train)
gsearch4.grid_scores_,gsearch4.best_params_, gsearch4.best_score_
rf_classifier = RandomForestRegressor(n_estimators=540,max_features=12,max_depth=96,min_samples_split=4,min_samples_leaf=2, oob_score=True)
rf_classifier.fit(X_train.iloc[:,0:41],y_train)
rf_classifier.oob_score_ #袋外分
pd.Series(rf_classifier.feature_importances_,index=X_train.columns[0:41]).sort_values(ascending=False) #特征重要性排序
#训练模型_end
#模型预测_start
results = rf_classifier.predict(X_test.iloc[:,0:41]).astype(int)
rf_classifier.score(X_test.iloc[:,0:41],y_test) #模型准确度
pddf = pd.DataFrame({'actual':y_test.price,'predict':results,'link':X_test.link,'size':X_test.area})
pddf['diff'] = abs(pddf.predict-pddf.actual)/pddf.actual
pddf_ordered = pddf.sort(columns='diff', ascending=False)
#模型预测_end
#############################灰色关联分析#############################
he_df = df[(df['rentType']=='合') & (df.time_unit!='每天') & (df.area>8) & (df.price<2200)] #过滤超出自己心理预期的数据
he_df['co_distance'] = he_df.apply(lambda row: getDistance(39.988122,116.319725,row.lat,row.lng), axis=1) #计算到公司的距离
#指标无量纲化(离差标准化)
he_feature_max = he_df[['area','price','co_distance']].max()
he_feature_min = he_df[['area','price','co_distance']].min()
he_df['area_nondim'] = he_df.apply(lambda row: (row.area-he_feature_min.area)/(he_feature_max.area-he_feature_min.area), axis=1)
he_df['price_nondim'] = he_df.apply(lambda row: (row.price-he_feature_min.price)/(he_feature_max.price-he_feature_min.price), axis=1)
he_df['co_distance_nondim'] = he_df.apply(lambda row: (row.co_distance-he_feature_min.co_distance)/(he_feature_max.co_distance-he_feature_min.co_distance), axis=1)
#计算关联系数
opt_series = pd.Series([1,0,0], index=['area_nondim','price_nondim','co_distance_nondim']) #设定最优化序列
he_df['area_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.area_nondim-opt_series.area_nondim), axis=1)
he_df['price_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.price_nondim-opt_series.price_nondim), axis=1)
he_df['co_distance_nondim_opt_diff'] = he_df.apply(lambda row: abs(row.co_distance_nondim-opt_series.co_distance_nondim), axis=1)
min_nondim_opt_diff = min(min(he_df['area_nondim_opt_diff']),min(he_df['price_nondim_opt_diff']),min(he_df['co_distance_nondim_opt_diff']))
max_nondim_opt_diff = max(max(he_df['area_nondim_opt_diff']),max(he_df['price_nondim_opt_diff']),max(he_df['co_distance_nondim_opt_diff']))
he_df['area_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.area_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['price_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.price_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['co_distance_cor'] = he_df.apply(lambda row: (min_nondim_opt_diff+0.5*max_nondim_opt_diff)/(row.co_distance_nondim_opt_diff+0.5*max_nondim_opt_diff), axis=1)
he_df['room_cor_order'] = he_df['area_cor']/6+he_df['price_cor']/3+he_df['co_distance_cor']/2
he_ordered_df = he_df.sort(columns='room_cor_order', ascending=False) #房间关联系数倒排 | gpl-3.0 |
castlest/shell-detection | coherence-elliptical-kernel/main.py | 1 | 7932 | '''
Author: S.T. Castle
Created: 2015-03-15
'''
#import math
import numpy as np
from scipy import ndimage
from scipy import stats
import scipy.ndimage.filters
import scipy.linalg
#import skimage.feature
import cv2
from matplotlib import pyplot as plt
def main():
'''
Run the explicit coherence enhancing filter with spatial adaptive
elliptical kernel from F.Li et al. 2012.
'''
# Params.
window_size = 7
sigma = 1 # Standard deviation of initial Gaussian kernel.
rho = 6 # Std dev of Gaussian kernel used to compute structure tensor.
gamma = 0.05
eps = np.spacing(1) # Very small positive number.
filename = 'fingerprint1.png'
# Open as grayscale image.
orig_img = cv2.imread(filename, 0)
print 'Opened ' + filename
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
# Convolve image with a Gaussian kernel with standard deviation sigma.
img = scipy.ndimage.filters.gaussian_filter(orig_img, sigma)
#plt.subplot(111),plt.imshow(img, cmap = 'gray')
#plt.title('Input image'), plt.xticks([]), plt.yticks([])
#plt.show()
print 'shape of img:',
print img.shape
# Compute the 2D structure tensor of the image.
# The structure tensor is:
# [j11 j12]
# [j12 j22]
#j11, j12, j22 = skimage.feature.structure_tensor(img, sigma=sigma)
j11, j12, j22 = structure_tensor(img, sigma=sigma)
#print 'j11'
#print j11
#print 'j12'
#print j12
#print 'j22'
#print j22
print 'shape of j11:',
print j11.shape
print 'shape of J:',
print np.array([[j11,j12],[j12,j22]]).shape
# Compute eigenvalues mu1, mu2 of structure tensor. mu1 >= mu2.
mu1 = (j11 + j22) / 2 + np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
mu2 = (j11 + j22) / 2 - np.sqrt(4 * j12 ** 2 + (j11 - j22) ** 2) / 2
print 'shape of mu1:',
print mu1.shape
# Compute corresponding normalized eigenvectors v1, v2.
v1 = np.asarray([ 2*j12,
j22-j11 + np.sqrt((j11-j22)**2 + 4*(j12**2)) ])
# Rearrange axis so that v1 is indexed as (x,y,(eigvector))
v1 = np.rollaxis(v1,0,3)
#print 'mu1'
#print mu1
#print 'mu2'
#print mu2
#print 'v1'
#print v1
#print 'v2'
#print v2
print 'shape of v1:',
print v1.shape
#print 'v1[0] =',
#print v1[0]
#print 'v1[0][0] =',
#print v1[0][0]
#print v1
# Compute theta based on the angle of v1 and the positive direction of
# the horizontal axis.
# cos(theta) = x / magnitude.
# If the magnitude is 0, then just try setting theta=0 for now.
print 'Calculating theta...'
theta = np.empty((v1.shape[0], v1.shape[1]))
for i in xrange(v1.shape[0]):
for j in xrange(v1.shape[1]):
v = v1[i][j]
mag = float(magnitude(v))
if mag:
theta[i][j] = np.arccos(v[0]/magnitude(v))
else:
theta[i][j] = 0
print 'Done.'
print 'shape of theta:',
print theta.shape
# Now that necessary values are calculated, proceed to filtering.
print 'Filtering...'
fimg = np.empty_like(img) # Create a blank array for the filtered image.
rad = window_size/2 # Radius of the filtering window.
sig1 = 10*gamma
# Current pixel is (x1,x2) and neighbor is (y1,y2).
height = img.shape[0]
width = img.shape[1]
for x1 in xrange(height):
for x2 in xrange(width):
eig1 = mu1[x1][x2]
eig2 = mu2[x1][x2]
ang = theta[x1][x2]
sig2 = 10*(gamma+(1-gamma)*np.exp(-1/((eig1-eig2)**2+eps)))
wt_const = 1/(2*np.pi*sig1*sig2) # Constant factor for weighting.
# Add weighted value from neighbor pixel y.
sum = 0
wt_sum = 0 # Sum of the weights for normalization scaling.
for i in xrange(-rad,rad+1):
y1 = x1+i
if (y1 < 0) or (y1 >= height):
continue
for j in xrange(-rad,rad+1):
y2 = x2+i
if (y2 < 0) or (y2 >= width):
continue
# Calculate weight of neighboring position y.
s = (y1-x1)*np.cos(ang) + (y2-x2)*np.sin(ang)
t = -(y1-x1)*np.sin(ang) + (y2-x2)*np.cos(ang)
wt = wt_const * np.exp( -s**2/(2*sig1**2) - t**2/(2*sig2**2) )
sum = sum + wt*orig_img[y1][y2] # Use original image or blurred?
wt_sum = wt_sum + wt
# Set value of this pixel x.
#sum = sum * (1.0/wt_sum) # Scale the pixel value.
fimg[x1][x2] = sum
print x1
print 'Done.'
# Display original and filtered images.
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(fimg, cmap = 'gray')
plt.title('Filtered Image'), plt.xticks([]), plt.yticks([])
plt.show()
def magnitude(v):
"""Magnitude of a vector."""
return np.sqrt(np.dot(v, v))
# from skimage !!!!
def _compute_derivatives(image, mode='constant', cval=0):
"""Compute derivatives in x and y direction using the Sobel operator.
Parameters
----------
image : ndarray
Input image.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
imx : ndarray
Derivative in x-direction.
imy : ndarray
Derivative in y-direction.
"""
imy = ndimage.sobel(image, axis=0, mode=mode, cval=cval)
imx = ndimage.sobel(image, axis=1, mode=mode, cval=cval)
return imx, imy
def structure_tensor(image, sigma=1, mode='constant', cval=0):
"""Compute structure tensor using sum of squared differences.
The structure tensor A is defined as::
A = [Axx Axy]
[Axy Ayy]
which is approximated by the weighted sum of squared differences in a local
window around each pixel in the image.
Parameters
----------
image : ndarray
Input image.
sigma : float
Standard deviation used for the Gaussian kernel, which is used as a
weighting function for the local summation of squared differences.
mode : {'constant', 'reflect', 'wrap', 'nearest', 'mirror'}, optional
How to handle values outside the image borders.
cval : float, optional
Used in conjunction with mode 'constant', the value outside
the image boundaries.
Returns
-------
Axx : ndarray
Element of the structure tensor for each pixel in the input image.
Axy : ndarray
Element of the structure tensor for each pixel in the input image.
Ayy : ndarray
Element of the structure tensor for each pixel in the input image.
Examples
--------
>>> from skimage.feature import structure_tensor
>>> square = np.zeros((5, 5))
>>> square[2, 2] = 1
>>> Axx, Axy, Ayy = structure_tensor(square, sigma=0.1)
>>> Axx
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 4., 0., 4., 0.],
[ 0., 1., 0., 1., 0.],
[ 0., 0., 0., 0., 0.]])
"""
#image = _prepare_grayscale_input_2D(image)
imx, imy = _compute_derivatives(image, mode=mode, cval=cval)
# structure tensore
Axx = ndimage.gaussian_filter(imx * imx, sigma, mode=mode, cval=cval)
Axy = ndimage.gaussian_filter(imx * imy, sigma, mode=mode, cval=cval)
Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode=mode, cval=cval)
return Axx, Axy, Ayy
if __name__ == '__main__':
main()
| bsd-3-clause |
Dioptas/pymatgen | pymatgen/analysis/diffraction/xrd.py | 2 | 14721 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an XRD pattern calculator.
"""
from six.moves import filter
from six.moves import map
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "5/22/14"
from math import sin, cos, asin, pi, degrees, radians
import os
import numpy as np
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
#XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent planes. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
#Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
#Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_data(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD data for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRD pattern) in the form of
[[two_theta, intensity, {(h, k, l): mult}, d_hkl], ...]
Two_theta is in degrees. Intensity is in arbitrary units and if
scaled (the default), has a maximum value of 100 for the highest
peak. {(h, k, l): mult} is a dict of Miller indices for all
diffracted lattice planes contributing to that intensity and
their multiplicities. d_hkl is the interplanar spacing.
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = filter(lambda d: d[1] >= min_r, recip_pts)
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
#Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
#Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
#Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
#Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
data = []
for k in sorted(peaks.keys()):
v = peaks[k]
scaled_intensity = v[0] / max_intensity * 100 if scaled else v[0]
fam = get_unique_families(v[1])
if scaled_intensity > XRDCalculator.SCALED_INTENSITY_TOL:
data.append([k, scaled_intensity, fam, v[2]])
return data
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
Returns:
(matplotlib.pyplot)
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(16, 10)
for two_theta, i, hkls, d_hkl in self.get_xrd_data(
structure, two_theta_range=two_theta_range):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
plt.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
plt.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=16)
plt.xlabel(r"2\theta (degrees)")
plt.ylabel("Intensities (scaled)")
plt.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
#TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = map(abs, hkl1)
h2 = map(abs, hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = {}
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2] += 1
break
if not found:
unique[hkl1] = 1
return unique
| mit |
jhsenjaliya/incubator-airflow | airflow/hooks/dbapi_hook.py | 14 | 9338 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
return '{conn.conn_type}://{login}{host}/{conn.schema}'.format(
conn=conn, login=login, host=host)
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
self.log.info(s)
if parameters is not None:
cur.execute(s, parameters)
else:
cur.execute(s)
conn.commit()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
l = []
for cell in row:
l.append(self._serialize_cell(cell, conn))
values = tuple(l)
placeholders = ["%s",]*len(values)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded {i} into {table} rows so far".format(**locals())
)
conn.commit()
self.log.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
nhuntwalker/astroML | examples/datasets/plot_sdss_galaxy_colors.py | 3 | 1300 | """
SDSS Galaxy Colors
------------------
The function :func:`fetch_sdss_galaxy_colors` used below actually queries
the SDSS CASjobs server for the colors of the 50,000 galaxies. Below we
extract the :math:`u - g` and :math:`g - r` colors for 5000 stars, and
scatter-plot the results
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from astroML.datasets import fetch_sdss_galaxy_colors
#------------------------------------------------------------
# Download data
data = fetch_sdss_galaxy_colors()
data = data[::10] # truncate for plotting
# Extract colors and spectral class
ug = data['u'] - data['g']
gr = data['g'] - data['r']
spec_class = data['specClass']
stars = (spec_class == 2)
qsos = (spec_class == 3)
#------------------------------------------------------------
# Prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(-0.5, 2.5)
ax.set_ylim(-0.5, 1.5)
ax.plot(ug[stars], gr[stars], '.', ms=4, c='b', label='stars')
ax.plot(ug[qsos], gr[qsos], '.', ms=4, c='r', label='qsos')
ax.legend(loc=2)
ax.set_xlabel('$u-g$')
ax.set_ylabel('$g-r$')
plt.show()
| bsd-2-clause |
hugobowne/playing_with_twitter | sql_test.py | 1 | 3384 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 6 16:52:06 2015
@author: hugobowne-anderson
"""
#http://zetcode.com/db/sqlitepythontutorial/
import sqlite3 as lite
import sys
import json
import unicodedata
###########MAKE LIST OF TWEETS:################
##set deirectory here:
tweets_data_path = '/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets/some_stream_pol.txt'
tweets_data = []
tweets_file = open(tweets_data_path, "r")
for line in tweets_file:
try:
tweet = json.loads(line)
tweets_data.append(tweet)
except:
continue
print len(tweets_data)
tweets_data.pop(0)
tweets_data[0].keys()
tweets_data[0]['place']
type(i['place'])
for i in tweets_data:
try:
print i['place']['full_name']
except:
print "Shiiiiiiiite: something missing in data"
############################################
####SQLITE#EG#########
con = lite.connect('test.db') ##connect to db
cur = con.cursor() #get cursor objet to traverse records from result set
cur.execute('SELECT SQLITE_VERSION()') ##execute method of cursor to execute an SQL statement/query
data = cur.fetchone() ##fetch data (one record for the time being)
print "SQLite version: %s" % data
con.close()
############################################
###add tweets to db
con = lite.connect('/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets//test.db') ##connect to db
cur = con.cursor()
cur.execute("DROP TABLE Tweets")
cur.execute("CREATE TABLE Tweets(Name TEXT, Tweet TEXT, Language TEXT, Created_at TEXT, Geo TEXT, Place TEXT)")
for i in tweets_data:
#aaaaaaand do check this out: http://sebastianraschka.com/Articles/2014_sqlite_in_python_tutorial.html
#print i['text']
try:
user = i['user']['name'];
except KeyError:
user = None;
try:
place = i['place']['full_name'];
except:
place = None;
try:
cur.execute("INSERT INTO Tweets VALUES(? , ? , ? , ? , ? , ?)",
(user, i['text'] , i['lang'] , i['timestamp_ms'] , i['geo'] , place));
except:
print "Shiiiiiiiite: something missing in data"
con.commit() #commit changes to db
con.close() #close connection
i.user()
#####test & select################
#cur.execute("SELECT * FROM Tweets");
con = lite.connect('/Users/hugobowne-anderson/repos/selenium-auto-posting/listening_to_tweets//test.db') ##connect to db
cur = con.cursor()
#cur.execute("SELECT Language FROM Tweets");
#rows = cur.fetchall()
#for row in rows:
# print row
cur.execute("SELECT place, count(place) from Tweets group by place");
rows = cur.fetchall()
for row in rows:
print row
cur.execute("SELECT Language, count(Language) from Tweets group by Language");
rows = cur.fetchall()
type(rows)
#plot(rows)
x = [];
y = [];
for row in rows:
x.append(row[0]);
y.append(row[1]);
import numpy as np
import matplotlib.pyplot as plt
#http://matplotlib.org/api/pyplot_api.html
N = len(x)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
plt.bar(ind, y )
plt.xticks(ind + width/2., x , rotation=90)
plt.ylabel('Number of tweets')
plt.ylabel('Language')
#####################################
########################################################
| gpl-2.0 |
liberatorqjw/scikit-learn | sklearn/neighbors/regression.py | 39 | 10464 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
amlyj/pythonStudy | 2.7/data_analysis/study_numpy/numpy_ndarray.py | 1 | 5118 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-7-22 上午12:41
# @Author : tom.lee
# @docs : http://old.sebug.net/paper/books/scipydoc/numpy_intro.html
# @File : study_numpy.py
# @Software: PyCharm
"""
numpy
Numpy是Python的一个科学计算的库,提供了矩阵运算的功能,其一般与Scipy,matplotlib一起使用.
NumPy提供了两种基本的对象:
ndarray(N-dimensional array object)ndarray(数组)是存储单一数据类型的多维数组;
ufunc(universal function object)而 ufunc则是能够对数组进行处理的函数。
"""
import numpy as np
def split_line():
print '*' * 6 ** 2
def np_version():
"""
版本
:return:
"""
print np.version.version
def np_list():
"""
numpy 数组 :
只能存储一种数据结构,
使用 "numpy.array()"来创建,
使用" dtype = numpy.类型" 来显示指定
:return:
"""
# 创建
l = np.array([1, 2, 3], dtype=np.int8)
a = np.array([1, 2, 3, 4])
b = np.array((5, 6, 7, 8))
c = np.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]])
print 'l:', l
print 'a:', a
print 'b:', b
print 'c:', c
split_line()
# 类型
print l.dtype, c.dtype
split_line()
# 大小: 数组a的shape只有一个元素,因此它是一维数组。
# 而数组c的shape有两个元素,因此它是二维数组,其中第0轴的长度为3,第1轴的长度为4
print l.shape, c.shape
split_line()
# 改变数组每个轴的长度 : 只是改变每个轴的大小,数组元素在内存中的位置并没有改变
c.shape = 4, 3
print c
split_line()
# 当某个轴的元素为-1时,将根据数组元素的个数自动计算此轴的长度,因此下面的程序将数组c的shape改为了(2,6)
c.shape = 2, -1
print c
split_line()
# 使用数组的reshape方法,可以创建一个改变了尺寸的新数组,原数组的shape保持不变
# 注意此时数组a和d其实共享数据存储内存区域
d = a.reshape((2, 2))
print 'a:', a
print 'd:', d
split_line()
def np_list_create():
# 使用xrange创建一维数组 [start,end,步长)包含起始位置,不包含终止位置,
# 元素个数: (end-start)/步长
np_lst = np.arange(0, 10, 1)
print np_lst
print '大小:%d' % np_lst.shape
split_line()
# 等差数列
# linspace(strat,end,size), [start,end]包含起始位置和终止位置,一共创建size个元素
# 可以通过endpoint关键字指定是否包括终值
print np.linspace(0, 1, 12)
split_line()
# 等比数列
# logspace(开始指数,结束指数,数量,底数默认10)
print np.logspace(0, 2, 20)
split_line()
def np_list_by_byte():
"""
使用frombuffer, fromstring, fromfile等函数可以从字节序列创建数组
使用时一定要传入dtype参数
Python的字符串实际上是字节序列,每个字符占一个字节,
因此如果从字符串s创建一个8bit的整数数组的话,所得到的数组正好就是字符串中每个字符的ASCII编码
:return:
"""
s = 'abcdefg'
print np.frombuffer(s, dtype=np.int8)
split_line()
print np.fromstring(s, dtype=np.int8)
split_line()
# 如果从字符串s创建16bit的整数数组,那么两个相邻的字节就表示一个整数,
# 把字节98和字节97当作一个16位的整数, 它的值就是98*256+97 = 25185。
# 可以看出内存中是以little endian(低位字节在前)方式保存数据的。
# 所以字符串的长度必须是偶数
print np.fromstring('abcdefgh', dtype=np.int16)
split_line()
def np_list_by_func():
"""
通过函数创建数组
:return:
"""
# fromfunction 传入一个函数,和表示一个维度大小的可迭代对象(元组,列表)
# 即(10,)表示一维数组,一维元素10个,此时函数接收一个参数
# (5,6)表示二维数组,一维元素5个,二维元素6个,此时函数接收2个参数
print np.fromfunction(lambda x: x + 1, (10,))
print np.fromfunction(lambda x, y: (x + 1) * (y + 1), (5, 6))
split_line()
def np_list_opt():
"""
numpy 列表基本操作和python list基本一致
:return:
"""
l = np.arange(10, 1, -1)
print l
print '做小值:', l.min()
print '最大值:', l.max()
print '下标0的元素:', l[0]
split_line()
# 高级用法,不会共享内存空间,以上操作会共享内存空间
print l[np.array([1, 5, 3])] # 使用数组获取下标元素
print l[[1, 5, 3]] # 使用列表获取下标元素
split_line()
# 列表直接过滤
print l[l > 3] # 直接获取列表大于3的值
print l > 3 # 判断列表元素是否大于3返回一个boolean 列表
split_line()
if __name__ == '__main__':
# np_version()
# np_list()
np_list_create()
# np_list_by_byte()
# np_list_by_func()
# np_list_opt()
print np.fromfunction(lambda x: x, (10,))
| mit |
barbour-em/osf.io | tasks.py | 1 | 23428 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import platform
import subprocess
import logging
from invoke import task, run
from website import settings
logging.getLogger('invoke').setLevel(logging.CRITICAL)
HERE = os.path.dirname(os.path.abspath(__file__))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
def get_bin_path():
"""Get parent path of current python binary.
"""
return os.path.dirname(sys.executable)
def bin_prefix(cmd):
"""Prefix command with current binary path.
"""
return os.path.join(get_bin_path(), cmd)
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
@task
def server(host=None, port=5000, debug=True, live=False):
"""Run the app server."""
from website.app import init_app
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
if live:
from livereload import Server
server = Server(app.wsgi_app)
server.watch(os.path.join(HERE, 'website', 'static', 'public'))
server.serve(port=port)
else:
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH])
@task
def apiserver(port=8000, live=False):
"""Run the API server."""
cmd = 'python manage.py runserver {}'.format(port)
if live:
cmd += ' livereload'
run(cmd, echo=True)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
Available variables:
{context}
"""
def make_shell_context():
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
app = init_app()
context = {
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = "{name}: {obj!r}".format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell():
context = make_shell_context()
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context)
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += " --fork"
run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient():
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
run("mongo {db} --port {port}".format(db=db, port=port), pty=True)
@task
def mongodump(path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongodump --db {db} --port {port} --out {path}".format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
run(cmd, echo=True)
print()
print("To restore from the dumped database, run `invoke mongorestore {0}`".format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = "mongorestore --db {db} --port {port}".format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += " --drop"
cmd += " " + path
run(cmd, echo=True)
@task
def sharejs(host=None, port=None, db_host=None, db_port=None, db_name=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_host:
os.environ['SHAREJS_DB_HOST'] = db_host
if db_port:
os.environ['SHAREJS_DB_PORT'] = db_port
if db_name:
os.environ['SHAREJS_DB_NAME'] = db_name
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
run("node {0}".format(share_server))
@task(aliases=['celery'])
def celery_worker(level="debug"):
"""Run the Celery process."""
cmd = 'celery worker -A framework.tasks -l {0}'.format(level)
run(bin_prefix(cmd))
@task
def rabbitmq():
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
run("rabbitmq-server", pty=True)
@task(aliases=['elastic'])
def elasticsearch():
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
run("sudo service elasticsearch start")
elif platform.system() == 'Darwin': # Mac OSX
run('elasticsearch')
else:
print("Your system is not recognized, you will have to start elasticsearch manually")
@task
def migrate_search(delete=False, index=settings.ELASTIC_INDEX):
'''Migrate the search-enabled models.'''
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def mailserver(port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
run(bin_prefix(cmd), pty=True)
@task(aliases=['flake8'])
def flake():
run('flake8 .', echo=True)
def pip_install(req_file):
"""Return the proper 'pip install' command for installing the dependencies
defined in ``req_file``.
"""
cmd = bin_prefix('pip install --exists-action w --upgrade -r {} '.format(req_file))
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
return cmd
@task(aliases=['req'])
def requirements(addons=False, release=False, dev=False):
"""Install python dependencies.
Examples:
inv requirements --dev
inv requirements --addons
inv requirements --release
"""
if release or addons:
addon_requirements()
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
run(pip_install(req_file), echo=True)
@task
def test_module(module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = " --verbosity={0} -s {1}".format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf():
"""Run the OSF test suite."""
test_module(module="tests/")
@task
def test_addons():
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(module=modules)
@task
def test(all=False, syntax=False):
"""Alias of `invoke test_osf`.
"""
if syntax:
flake()
if all:
test_all()
else:
test_osf()
@task
def test_all(syntax=False):
if syntax:
flake()
test_osf()
test_addons()
karma(single=True, browsers='PhantomJS')
@task
def karma(single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires Chrome to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
run(cmd, echo=True)
@task
def wheelhouse(addons=False, release=False, dev=False):
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
if not addons:
return
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH)
run(cmd, pty=True)
@task
def addon_requirements():
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
try:
requirements_file = os.path.join(path, 'requirements.txt')
open(requirements_file)
print('Installing requirements for {0}'.format(directory))
cmd = 'pip install --exists-action w --upgrade -r {0}'.format(requirements_file)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
run(bin_prefix(cmd))
except IOError:
pass
print('Finished')
@task
def encryption(owner=None):
"""Generate GnuPG key.
For local development:
> invoke encryption
On Linode:
> sudo env/bin/invoke encryption --owner www-data
"""
if not settings.USE_GNUPG:
print('GnuPG is not enabled. No GnuPG key will be generated.')
return
import gnupg
gpg = gnupg.GPG(gnupghome=settings.GNUPG_HOME, gpgbinary=settings.GNUPG_BINARY)
keys = gpg.list_keys()
if keys:
print('Existing GnuPG key found')
return
print('Generating GnuPG key')
input_data = gpg.gen_key_input(name_real='OSF Generated Key')
gpg.gen_key(input_data)
if owner:
run('sudo chown -R {0} {1}'.format(owner, settings.GNUPG_HOME))
@task
def travis_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings():
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings()
@task
def packages():
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install gpg',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task
def npm_bower():
print('Installing bower')
run('npm install -g bower', echo=True)
@task(aliases=['bower'])
def bower_install():
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
@task
def setup():
"""Creates local settings, installs requirements, and generates encryption key"""
copy_settings(addons=True)
packages()
requirements(addons=True, dev=True)
encryption()
from website.app import build_js_config_files
from website import settings
# Build nodeCategories.json before building assets
build_js_config_files(settings)
assets(dev=True, watch=False)
@task
def analytics():
from website.app import init_app
import matplotlib
matplotlib.use('Agg')
init_app()
from scripts import metrics
from scripts.analytics import (
logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
modules = (
metrics, logs, addons, comments, folders, links, watch, email_invites,
permissions, profile, benchmarks
)
for module in modules:
module.main()
@task
def clear_sessions(months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
run('git checkout {}'.format(name), echo=True)
run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
run('git push origin master', echo=True)
run('git push --tags', echo=True)
run('git push origin develop', echo=True)
@task
def feature(name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
run('git branch -m feature/{}'.format(name), echo=True)
if finish:
run('git flow feature finish {}'.format(name), echo=True)
if push:
run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
"git",
"describe",
"--dirty",
"--tags",
"--long",
"--abbrev=40"
], stderr=subprocess.STDOUT
).decode().split("-")
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == "dirty":
info["dirty"] = True
describe_out.pop()
info["commit_sha"] = describe_out.pop().lstrip("g")
info["distance_to_latest_tag"] = int(describe_out.pop())
info["current_version"] = describe_out.pop().lstrip("v")
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
run(cmd)
@task
def generate_key_nopass(domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
run(cmd)
@task
def generate_csr(domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
run(cmd)
@task
def request_ssl_cert(domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(domain)
generate_key_nopass(domain)
generate_csr(domain)
@task
def bundle_certs(domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
run(cmd)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task()
def assets(dev=False, watch=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
@task
def update_citation_styles():
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print("Parsed {} styles".format(total))
| apache-2.0 |
HolgerPeters/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
pchrista/AliPhysics | PWGLF/NUCLEX/Nuclei/NucleiPbPb/macros_pp13TeV/CorrelationFraction.py | 19 | 2196 | import uproot
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
shift_list = [[1, -2], [2, -1], [3, 1], [4, 2]]
dcaxy_list = [[0, 1.0], [1, 1.4]]
dcaz_list = [[0, 0.5], [1, 0.75], [2, 1.25], [3, 1.50]]
pid_list = [[0, 3.25], [1, 3.5]]
tpc_list = [[0, 60], [1, 65], [2, 75], [3, 80]]
width_list = [[4, -2], [2, -1], [1, +1], [3, 2]]
cuts = {"shift": [shift_list, "Bin shift"], "dcaxy": [dcaxy_list, "$DCA_{xy}$ (mm)"], "dcaz": [dcaz_list, "$DCA_{z}$ (cm)"], "pid": [
pid_list, "$n\sigma_{TPC}$"], "tpc": [tpc_list, "TPC clusters"], "width": [width_list, "Bin width"]}
inFile = uproot.open("spectra.root")
normHist = inFile["nuclei_deuterons_/deuterons/9/Joined/JoinedSpectraM9"]
norm = normHist.values[:-3]
pt = [0.5 * (x + y) for x, y in zip(normHist.edges[:-4], normHist.edges[1:-3])]
colors = cm.rainbow(np.linspace(0, 1, len(norm)))
cMap = plt.get_cmap('jet')
cNorm = matplotlib.colors.Normalize(vmin=min(pt), vmax=max(pt))
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cMap)
for key, record in cuts.items():
x = np.array([])
y = np.array([])
fig,ax = plt.subplots()
for obj_list in record[0]:
obj_label=obj_list[0]
obj_val=obj_list[1]
values = inFile["nuclei_deuterons_{}{}/deuterons/9/Joined/JoinedSpectraM9".format(
key, obj_label)].values[:-3]
values = values / norm
x = np.append(x, np.array([obj_val for _ in range(0, len(values))]))
y = np.append(y, values)
plt.scatter(np.array([obj_val for _ in range(0, len(values))]),
values, color=scalarMap.to_rgba(pt), edgecolors='none')
scalarMap.set_array(pt)
fig.colorbar(scalarMap).set_label("$p_{T}$ (GeV/$c$)")
plt.ylabel("$S_{var}$ / $S_{ref}$")
plt.xlabel(record[1])
ax.tick_params(axis="y",direction="in")
ax.tick_params(axis="x",direction="in")
ax.xaxis.set_label_coords(0.9,-0.07)
ax.yaxis.set_label_coords(-0.115,0.9)
plt.text(0.5, 0.92, 'This work', ha='center', va='center', transform=ax.transAxes, fontweight='bold', fontsize=14)
print(np.corrcoef(x, y))
plt.savefig("{}.pdf".format(key))
# for name, keys in cuts:
| bsd-3-clause |
RPGOne/Skynet | imbalanced-learn-master/examples/under-sampling/plot_cluster_centroids.py | 3 | 1884 | """
=================
Cluster centroids
=================
An illustration of the cluster centroids method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.under_sampling import ClusterCentroids
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply Cluster Centroids
cc = ClusterCentroids()
X_resampled, y_resampled = cc.fit_sample(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('Cluster centroids')
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
Debaq/Triada | FullAxis_GUI/DB/BASE DE DATOS EXPERIMENTO/experimento 3/gaby hernandez/medidor3.py | 27 | 3052 | import argparse
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import json
parser = argparse.ArgumentParser(description="Does some awesome things.")
parser.add_argument('message', type=str, help="pass a message into the script")
args = parser.parse_args(sys.argv[1:])
data = []
New_data=[]
dt=[]
with open(args.message) as json_file:
data = json.load(json_file)
def graph(grid,d_tiempo):
plt.switch_backend('TkAgg') #default on my system
f = plt.figure(num=args.message, figsize=(20,15))
mng = plt._pylab_helpers.Gcf.figs.get(f.number, None)
print(New_data)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.title(args.message)
if grid == 1:
tempo = d_tiempo
tempo_init = tempo[0]
tempo_end = tempo[-1]
gs1 = GridSpec(4, 1)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=0.3, bottom=0.08)
ax1 = plt.subplot(gs1[0, :])
ax1.grid()
ax1.set_ylabel('Pitch',fontsize=8)
if grid ==1:
L1 = ax1.plot(d_tiempo,New_data['pitch'])
else:
L1 = ax1.plot(d_tiempo,data['pitch'])
ax2 = plt.subplot(gs1[1, :])
ax2.grid()
ax2.set_ylabel('Roll',fontsize=8)
if grid ==1:
L1 = ax2.plot(d_tiempo,New_data['roll'])
else:
L1 = ax2.plot(d_tiempo,data['roll'])
ax3 = plt.subplot(gs1[2, :])
ax3.grid()
ax3.set_ylabel('Yaw',fontsize=8)
if grid ==1:
L1 = ax3.plot(d_tiempo,New_data['yaw'])
else:
L1 = ax3.plot(d_tiempo,data['yaw'])
ax4 = plt.subplot(gs1[3, :])
ax4.grid()
ax4.set_ylabel('Tiempo',fontsize=8)
if grid ==1:
L1 = ax4.plot(d_tiempo,New_data['ledblue'])
L2 = ax4.plot(d_tiempo,New_data['ledred'])
else:
L1 = ax4.plot(d_tiempo,data['ledblue'])
L2 = ax4.plot(d_tiempo,data['ledred'])
plt.show()
def find_nearest(array,values):
idx = np.abs(np.subtract.outer(array, values)).argmin(0)
return idx
def corte(init_cut,end_cut,a,b,c,d,e,f,g,h,i):
a=a[init_cut:end_cut]
b=b[init_cut:end_cut]
c=c[init_cut:end_cut]
d=d[init_cut:end_cut]
e=e[init_cut:end_cut]
f=f[init_cut:end_cut]
g=g[init_cut:end_cut]
h=h[init_cut:end_cut]
i=i[init_cut:end_cut]
datos={'roll':a,'pitch':b,'yaw':c, 'X':d, 'Y':e, 'Z':f,'time':g, 'ledblue':h, 'ledred':i}
return datos
def reset_tempo(var_in,var_out):
uni = var_in[0]
for t in range(0,len(var_in)):
var_out.append(round((var_in[t]-uni),3))
return var_out
graph(0,data['time'])
init_cut = float(input("tiempo inicial: "))
init_cuty = find_nearest(data['time'],init_cut)
end_cut = float(input("tiempo final: "))
end_cuty = find_nearest(data['time'],end_cut)
New_data=corte(init_cuty,end_cuty,data['pitch'],data['roll'],data['yaw'],data['X'],data['Y'],data['Z'],data['time'],data['ledblue'],data['ledred'])
data = []
print(data)
data = New_data
print(data)
dt = reset_tempo(New_data['time'],dt)
graph(0,dt)
| gpl-3.0 |
alexgorban/models | research/object_detection/dataset_tools/create_oid_tf_record.py | 3 | 5198 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
BiaDarkia/scikit-learn | examples/plot_multioutput_face_completion.py | 79 | 2986 | """
==============================================
Face completion with a multi-output estimators
==============================================
This example shows the use of multi-output estimator to complete images.
The goal is to predict the lower half of a face given its upper half.
The first column of images shows true faces. The next columns illustrate
how extremely randomized trees, k nearest neighbors, linear
regression and ridge regression complete the lower half of those faces.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
# Load the faces datasets
data = fetch_olivetti_faces()
targets = data.target
data = data.images.reshape((len(data.images), -1))
train = data[targets < 30]
test = data[targets >= 30] # Test on independent people
# Test on a subset of people
n_faces = 5
rng = check_random_state(4)
face_ids = rng.randint(test.shape[0], size=(n_faces, ))
test = test[face_ids, :]
n_pixels = data.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
# Fit estimators
ESTIMATORS = {
"Extra trees": ExtraTreesRegressor(n_estimators=10, max_features=32,
random_state=0),
"K-nn": KNeighborsRegressor(),
"Linear regression": LinearRegression(),
"Ridge": RidgeCV(),
}
y_test_predict = dict()
for name, estimator in ESTIMATORS.items():
estimator.fit(X_train, y_train)
y_test_predict[name] = estimator.predict(X_test)
# Plot the completed faces
image_shape = (64, 64)
n_cols = 1 + len(ESTIMATORS)
plt.figure(figsize=(2. * n_cols, 2.26 * n_faces))
plt.suptitle("Face completion with multi-output estimators", size=16)
for i in range(n_faces):
true_face = np.hstack((X_test[i], y_test[i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 1,
title="true faces")
sub.axis("off")
sub.imshow(true_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
for j, est in enumerate(sorted(ESTIMATORS)):
completed_face = np.hstack((X_test[i], y_test_predict[est][i]))
if i:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j)
else:
sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j,
title=est)
sub.axis("off")
sub.imshow(completed_face.reshape(image_shape),
cmap=plt.cm.gray,
interpolation="nearest")
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
c-wilson/klustaviewa | klustaviewa/views/tests/test_projectionview.py | 2 | 1598 | """Unit tests for projection view."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import numpy as np
import numpy.random as rnd
import pandas as pd
from kwiklib.dataio import KlustersLoader
from kwiklib.dataio.selection import select
from kwiklib.dataio.tools import check_dtype, check_shape
from klustaviewa import USERPREF
from klustaviewa.views import ProjectionView
from klustaviewa.views.tests.utils import show_view, get_data, assert_fun
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_projectionview():
kwargs = {}
kwargs['operators'] = [
lambda self: assert_fun(self.view.get_projection(0) == (0, 0)),
lambda self: assert_fun(self.view.get_projection(1) == (0, 1)),
lambda self: self.view.select_channel(0, 5),
lambda self: self.view.select_feature(0, 1),
lambda self: self.view.select_channel(1, 32),
lambda self: self.view.select_feature(1, 2),
lambda self: assert_fun(self.view.get_projection(0) == (5, 1)),
lambda self: assert_fun(self.view.get_projection(1) == (32, 2)),
lambda self: (self.close()
if USERPREF['test_auto_close'] != False else None),
]
kwargs['fetdim'] = 3
kwargs['nchannels'] = 32
kwargs['nextrafet'] = 3
# Show the view.
show_view(ProjectionView, **kwargs)
| bsd-3-clause |
Kate-Willett/HadISDH_Build | F10_MissingAdjUnc_AdjPlots.py | 1 | 30317 | # PYTHON 3
# >module load scitools/default-current
#
# Author: Kate Willett
# Created: 1 February 2013 (IDL)
# Last update: 1 December 2020 (Python 3 from 1st Dec 2020)
# Location: /home/h04/hadkw/HadISDH_Code/HADISDH_BUILD/
# GitHub: https://github.com/Kate-Willett/HadISDH_Build
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code finds the missed adjustment uncertainty for each variable by looking at a gaussian fit to the distribution of adjustments
# and the actual distribution. The missed adjustment uncertainty is the standard deviation of the difference.
#
# This code looks at all of the adjustments allocated to all stations for a variable
# It plots a distribution of adjustment magnitude and a time series of adjustment frequency.(optional plot output)
# It also assesses the mean and standard deviation of the adjustments in absolute and actual space (also median)
# It also fits a gaussian to the distribution, adjusts it to include the 'fatter tails' and then
# takes the difference between the two to identify the 'missing adjustments / missing middle'
# The standard deviation of the missed adjustment distribution is taken as the 1 sigma uncertainty
# remaining in the data from missed adjustments
#
# Mean number of changepoints and distribution stats are output to file for read in by other programs
#
# Plots are output and a list of stations and adjustments from largest to smallest.
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
# <List of program modules required to run the code, or link to compiler/batch file>
#
# -----------------------
# DATA
# -----------------------
# inlist: list of stations to work through
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/goodforHadISDH.'+version+'_PHAdpd.txt'
# NB - IF YOU RERUN LATER YOU WILL HAVE TO SWAP TO USING _KeptLarge.txt!!!
# inlog: list of adjustment locations and magnitudes for each station
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/HadISDH.landDPD.'+version+'_PHA.log'
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Go through everything in the 'Start' section to make sure dates, versions and filepaths are up to date
# If we're using the Config file (F1_HadISDHBuildConfig.txt) then make sure that is correct.
#>./F10_submit_spice.bash - this will run all variables so you will have to comment out any you do not wish to run
#or
#>module load scitools/default-current # for Python 3
#>python F10_MissingAdjUncPlots.py --var <var>
#
# <var> can be q, rh, t, td, tw, e, dpd
#
# -----------------------
# OUTPUT
# -----------------------
# outplots:
# /scratch/hadkw/UPDATE<YYYY>/IMAGES/BUILD/HadISDH.landDPD.'+versiondots+'_adjspread_PHA.eps'
# outadjs: list of stations and adjustments from largest to smallest
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/Largest_Adjs_landDPD.'+versiondots+'_PHA.txt'
# outstats: for each variable/homogtype a list of stats is output
# /scratch/hadkw/UPDATE<YYYY>/LISTS_DOCS/Adjs_Stats.'+versiondots+.txt'
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 4 (1 December 2020)
# ---------
#
# Enhancements
#
# Changes
# This is now Python 3 rather than IDL
#
# Bug fixes
#
#
# Version 3 (6 February 2018)
# ---------
#
# Enhancements
# This program now finds the number of stations in the candidate variable station
# list so that it does not need to be put in manually before running.
# Next I should pull out the variable and homogtype so that it is stated
# at the command line rather than changed within the file before running.
#
# Now has param (variable) and homogtype called at the command line so you only need to edit the file once per year
#
# This now outputs a list of stats for each variable/homogtype to file (appends) so that other programs can read in
#
# This now has a 'KeptLarge' true/false switch to easily switch when running after the first inital run where you need
# to use the _KeptLarge.txt station lists
#
# Changes
#
# Bug fixes
#
# Version 2 (27 January 2017)
# ---------
#
# Enhancements
# General tidy up and move of all editables to the top
#
# Changes
#
# Bug fixes
#
# Version 1 (15 January 2015)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
# USE python3
# module load scitools/default-current
# python F10_MissingAdjUnc_AdjPlots.py --var <var>
#
# For debugging
# ipython
# %pdb
# %run F10_MissingAdjUnc_AdjPlots.py <var>
#
# REQUIRES
# ReadNetCDF.py
#
#************************************************************************
# Set up python imports
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
#from lmfit import Model
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi
import struct
import pdb
#import netCDF4 as nc4
from subprocess import check_output
from subprocess import call
#import ReadNetCDF
#from GetNiceTimes import MakeDaysSince
# Start and end years if HardWire = 1
styear = 1973
edyear = 2019
# Dataset version if HardWire = 1
versiondots = '4.2.0.2019f'
version = 'v420_2019f'
hadisdversiondots = '3.1.0.2019f'
hadisdversion = 'v310_2019f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
versiondots = ConfigDict['VersionDots']
hadisdversiondots = ConfigDict['HadISDVersionDots']
styear = ConfigDict['StartYear']
edyear = ConfigDict['EndYear']
# Do you want to run for plots only - not output stats?
PlotOnly = False # False = plots and output stats, True = plots only
# Growth Factor for Pseudomax of Gaussian Fitting
GrowthFactor = 1.5 # Tried 1.2, bit small
# Do you want to run for a second time (e.g., for PlotOnly = True) where station lists have already been saved as _KeptLarge.txt?
KeptLarge = False # TRUE (run with _KeptLarge station lists or FALSE (normal, first time run)
if (KeptLarge):
KL = '_KeptLarge'
else:
KL = ''
# Set up directories locations
updateyy = str(edyear)[2:4]
updateyyyy = str(edyear)
workingdir = '/scratch/hadkw/UPDATE'+updateyyyy
#workingdir = '/data/users/hadkw/WORKING_HADISDH/UPDATE'+updateyyyy
OUTPLOTDIR = workingdir+'/IMAGES/BUILD/'
# Set up filenames
INDIR = workingdir+'/LISTS_DOCS/'
OUTPUTLOG = workingdir+'/LISTS_DOCS/OutputLogFile'+versiondots+'.txt'
#OUTSTATS = workingdir+'/LISTS_DOCS/Adjs_Stats.'+versiondots+'.txt'
# Set up variables
MDI = -1e+30
#*** at some point add all the header info from the new HadISD files***
# Dictionaries for param, units, homogtype, binsize
ParamDict = dict([('q',['q','g/kg','IDPHA','PHA', 0.05]),
('rh',['RH','%rh','IDPHA','PHA', 0.5]),
('t',['T','deg C','IDPHA','PHA', 0.05]), # Note this needs to be changed to IDPHAMG later
('td',['Td','deg C','PHADPD','PHA', 0.1]),
('tw',['Tw','deg C','IDPHA','PHA', 0.05]),
('e',['e','hPa','IDPHA','PHA', 0.05]),
('dpd',['DPD','deg C','PHA','PHA', 0.1])])
#************************************************************************
# Subroutines
#************************************************************************
# READDATA
def ReadData(FileName,typee,delimee):
''' Use numpy genfromtxt reading to read in all rows from a complex array '''
''' Need to specify format as it is complex '''
''' outputs an array of tuples that in turn need to be subscripted by their names defaults f0...f8 '''
return np.genfromtxt(FileName, dtype=typee, delimiter=delimee, encoding='latin-1') # ReadData
# return np.genfromtxt(FileName, dtype=typee, delimiter=delimee) # ReadData
#************************************************************************
# GAUSSIAN
def Gaussian(x, amp, cen, wid):
' A Gaussian model for curve fitting '
' x is the x axis points '
' amp is the maximum amplitude '
' cen is the mean '
' wid is the standard deviation '
return amp * exp(-(x-cen)**2 / (2*wid**2))
#****************************************************************************
# GETADJUSTMENTARRAYS
def GetAdjustmentArrays(INFIL, MCount, SCount, HType, WMOList, WBANList):
' This opens the adjustment list, searches for the adjustments for each station, adds them to appropriate lists '
' Inputs: '
' INFIL = string filepath and name for list of adjustments '
' MCount = integer number of months in time series '
' SCount = integer number of stations in dataset '
' HType = string homogtype eg. PHA, IDPHA, PHADPD '
' WMOList = list of WMO IDs '
' WBANList = list of WBAN IDs '
' Returns: '
' Adj_Locs = MCount integer array counting number of changepoints at each time point across entire dataset '
' Adj_Mags_Accum = Float array of all adjustments as read from file - accumalated magnitudes '
' Adj_Mags_Act = Float array of all adjustments as actual individual magnitudes '
' Adj_WMOs = integer array of WMO IDs to match each adjustment listed in Adj_Mags_Act and Adj_Mags_Accum '
GAdj_Locs = np.repeat(0,MCount) # integer array for storing locations of changepoints
GAdj_Mags_Accum = [] # grow this array on the fly
GAdj_Mags_Act = []
GAdj_WMOs = []
GAdj_WBANs = []
# # Gunzip PHA output file
print(INFIL)
# call(['gunzip',INFIL+'.gz'])
# loop through station by station to get all of the adjustments and locations
for st in range(SCount):
# find homog adj for this station and append to array
if (HType == 'PHA'):
#PHA - 0=ID, 3=stmon,6=edmon, 8=ibreak, 9=cbreak, 10=adj, 11=eadj
edmonget = 6
adjget = 10
moo = check_output(['grep','-a','^Adj write:'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split('Adj write:')
# Remove the blank string
moo.remove('')
elif (HType == 'IDPHAMG') or (HType == 'PHADPD'):
#IDPHAMG - 0=ID, 2=stmon, 3=edmon, 6=adj, 7=eadj, 8=adj source indicator
edmonget = 3
adjget = 6
moo = check_output(['grep','-a','^'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split('\n') # no space
# Remove the blank string at the end
moo.remove('')
else:
#IDPHA - 0=ID, 2=stmon, 3=edmon, 6=adj, 7=eadj
edmonget = 3
adjget = 6
moo = check_output(['grep','-a','^'+WMOList[st]+WBANList[st],INFIL])
# Now sort out this string which is a byte array
# This creates a list of strings for each adjustment with a blank string at the beginning
moo = moo.decode("utf-8").split(' \n')
# Remove the blank string at the end
moo.remove('')
# Strip the \n newline characters, random letters and split the strings to make a list of lists
# b, i, p in IDPHAMG
moo = [i.strip(' ABCDEFGHIJKLMNOPQRSTUVWXYZbip\n').split() for i in moo]
# print('Check adjustment read in')
# pdb.set_trace()
# Good for IDPHA and PHA
# Now loop through the adjustments to append to array
AdjVals = []
# Ignore first line as this is most recent period so adjustment is 0
for rec,adjstr in enumerate(moo[1:]):
Adj = -(np.copy(np.float(adjstr[adjget])))
#print(Adj)
# Break location
Loc = np.int(adjstr[edmonget]) - 1
GAdj_Locs[Loc] += 1 # increment this location by 1
GAdj_Mags_Accum.append(Adj)
AdjVals.append(Adj)
GAdj_WMOs.append(WMOList[st])
GAdj_WBANs.append(WBANList[st])
# Now get the actual adjustments from AdjVals
if (rec == 0):
GAdj_Mags_Act.append(AdjVals[0])
else:
GAdj_Mags_Act.append(AdjVals[rec] - AdjVals[rec-1])
# print('Check adjustment arrays')
# pdb.set_trace()
# Good for IDPHA
# # gzip PHA output file for tidiness
# call(['gzip',INFIL])
return GAdj_Locs, GAdj_Mags_Accum, GAdj_Mags_Act, GAdj_WMOs, GAdj_WBANs
#******************************************************************
# GETHIST
def GetHist(BinSz, GAdj_Mags_Act):
' Works out the things needed for creating a histogram of adjustments and returns histogram and stats '
' Inputs: '
' BinSz = float size of bins '
' GAdj_Mags_Act = Float array of actual adjustment magnitudes '
' Returns: '
' GHistAdjsMagsAct = Histogram of adjustments - Int array of counts for each point of histogram '
' GXarr = Float array of bin midpoints '
' GBinArr = Float array of bin points from leftmost to include final rightmost '
' GMeanAdj = float mean of all adjustment magnitudes '
' GStdAdj = float standard deviation of all adjustment magnitudes '
' GMaxFreq = int maximum count for a bin within the histogram '
# Set up the bins for the histogram
minX = np.floor(np.min(GAdj_Mags_Act)) - (BinSz/2.)
maxX = np.ceil(np.max(GAdj_Mags_Act)) + (BinSz/2.)
# Make the bins symmetrical - not quite sure why - its nice?
if (abs(minX) > maxX):
maxX = abs(minX)
else:
minX = -(maxX)
# np.linspace better for floats than np.arange which gives inconsistent results for some reason
GBinArr = np.linspace(minX,maxX,np.round(((maxX - minX) / BinSz)) + 1)
GXarr = GBinArr[0:-1] + (BinSz/2.)
# Get the histogram of the Actual Adjustment Magnitudes Adj_Mags_Act
HistRes = plt.hist(GAdj_Mags_Act,bins=GBinArr) # [0] = histogram, [1] = bins
GHistAdjMagsAct = HistRes[0]
# Find the mean and standard deviation of adjustments
GMeanAdj = np.mean(GAdj_Mags_Act)
GStdAdj = np.std(GAdj_Mags_Act)
# Find the max value in the histogram
GMaxFreq = np.max(GHistAdjMagsAct)
# # CHECK THIS!!!
# print('Check Bins')
# pdb.set_trace()
return GHistAdjMagsAct, GXarr, GBinArr, GMeanAdj, GStdAdj, GMaxFreq
#*****************************************************************
# GETNEWMIDDLE
def GetNewMiddle(GHistAdjMagsAct, GXarr, BinSz, GMeanAdj, GMaxFreq):
' Find out if there is a dip or plateau in the middle '
' This depends on well behaved histograms that do not ahve any unusual peaks other than a missing middle. '
' 1. Split histogram below and =/above the mean '
' 2. Find local max of lower half and upper half '
' 3. Set all lower values (if any) after local max in lower half to max of histogram '
' - if there are lower values then set FixLow = True '
' 4. Set all lower values (if any) before local max in upper half to max of histogram '
' - if there are lower values then set FixHigh = True '
' - if there are lower values then set first point of upper value (closest to mean) to GrowthFactor * max of histogram (pseudomax) '
' 5. If there is no dip in upper values, so no pseudomax set, then if there is a dip in the lower half: '
' - set last point of lower half (closest to mean) to GrowthFactor * max of histogram (pseudomax) '
' 6. Merge new low and high values and return '
' Inputs: '
' GHistAdjMagsAct = Int array of histogram of adjustment magnitudes '
' GXarr = float array of bin midpoints '
' BinSz = float bin size '
' GMeanAdj = float mean of adjustment magnitudes '
' GMaxFreq = int max of histogram '
' Returns: '
' GNewHistAdjMagsAct = float array of adjustment magnitudes with missing middle set to max values '
' FixLow = True if a dip is found, False if not '
' FixHigh = True if a dip is found, False if not '
FixLow = False
FixHigh = False
# Search for max in everything below the mean
LowHalf = GHistAdjMagsAct[np.where((GXarr + (BinSz / 2.)) < GMeanAdj)]
LowHalfMax = np.max(LowHalf)
GotMax = np.where(LowHalf == LowHalfMax)[0]
# pdb.set_trace()
# If the max is before the end of the array then set all after max to max
if (GotMax[0] < (len(LowHalf)-1)):
LowHalf[GotMax[0]+1:] = GMaxFreq
FixLow = True
# print('Found dip in low half')
# Search for max in everything above the mean
HighHalf = GHistAdjMagsAct[np.where((GXarr + (BinSz / 2.)) >= GMeanAdj)]
HighHalfMax = np.max(HighHalf)
GotMax = np.where(HighHalf == HighHalfMax)[0]
# pdb.set_trace()
# If the max is after the beginning of the array then set all before max to max and first value (closest to mean) to max*GrowthFactor
if (GotMax[0] > 0):
HighHalf[:GotMax[0]] = GMaxFreq # do not need + 1 here because I don't want to reset the local maximum
HighHalf[0] = GMaxFreq * GrowthFactor
FixHigh = True
# print('Found dip in high half - setting pseudomax')
# Run a check that if no pseudomax value has been set in HighHalf because there is no dip then one shoudl be set in LowHalf if there is a dip there
elif (FixLow):
LowHalf[-1] = GMaxFreq * GrowthFactor
# print('Set pseudomax in low half')
# Merge Low and High half to create new histogram with completed middle
GNewHistAdjMagsAct = np.append(LowHalf,HighHalf)
# print('Check LowHalf and HighHalf')
# pdb.set_trace()
# Might need some threshold to detect the dip - if just one or two values is it really?
# For q IDPHA only one value in dip but fit is better with pseudomax set.
return GNewHistAdjMagsAct, FixLow, FixHigh
#***********************************************************
# OUTPUTSTATS
def OutPutStats(OUTLOG, OUTADJS, GAdj_Mags_Act, GDiffValsArr, GAdj_WMOs, GAdj_WBANs, GVar, HType, SCount):
' Write out statistics to OUTPUTLOG and OUTADJS '
' Inputs: '
' OUTLOG = string filepath and name for OUTPUTLOG '
' OUTLIST = string filepath and name for OUTADJS '
' GAdj_Mags_Act = float array of all adjustment magnitudes '
' GDiffValsArr = float array of missing adjustments '
' GAdj_WMOs = string array of WMO IDs for each of GAdj_Mags_Act '
' GAdj_WBANs = string array of WBAN IDs for each of GAdj_Mags_Act '
' GVar = string varname '
' HType = string homogtype '
' SCount = integer number of stations '
' Returns: '
' Nothing '
# Get the sorted absolute adjustments
Abs_Adj_Mags_Act = np.sort(abs(np.array(GAdj_Mags_Act)))
filee = open(OUTLOG,'a+')
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_MEAN=',np.mean(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_MEDIAN=',np.median(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_ABS_STD=',np.std(Abs_Adj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN=',np.mean(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEDIAN=',np.median(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_STD=',np.std(GAdj_Mags_Act)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN_GAUSSDIFFS=',np.mean(GDiffValsArr)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_STD_GAUSSDIFFS=',np.std(GDiffValsArr)))
filee.write('%s%s%s%s%.3f\n' % (GVar,'_',HType,'_MEAN_ADJ_NO=',len(GAdj_Mags_Act) / float(SCount)))
filee.close()
# Get sorted array of adjustments for kicking out largest
GAdj_Mags_Act = np.array(GAdj_Mags_Act)
OrderAdj = np.flip(np.argsort(abs(GAdj_Mags_Act))) # gives the index of the sorted values, largest to smallest
# pdb.set_trace()
# Sorted_Adj_Mags_Act = np.flip(np.sort(GAdj_Mags_Act)) # now sorted and largest to smallest
Sorted_Adj_Mags_Act = GAdj_Mags_Act[OrderAdj] # now sorted and largest to smallest
# Print out these in order
filee = open(OUTADJS,'a+')
for i, AdjVal in enumerate(Sorted_Adj_Mags_Act):
stradj = '% 7.2f' % AdjVal
filee.write('%s%s%s%s\n' % (GAdj_WMOs[OrderAdj[i]],GAdj_WBANs[OrderAdj[i]],' ', stradj))
filee.close()
return
#********************************************************************************
def PlotAdjs(OutFile, GHistAdjMagsAct, GGaussCurve, GMergeFit, GDiffArr, GDiffValsArr, GXarr, GAdj_Locs, MCount, StYr, EdYr, Unit):
''' Plot histogram of adjustments and estimates for missing middle '''
''' Plot time series of location of changepoints '''
# Set up for 2 panel plot
plt.clf()
f,axarr=plt.subplots(2,figsize=(7,10),sharex=False) #6,18
# Set up the historgram
axarr[0].set_position([0.1,0.6,0.85,0.35])
axarr[0].set_xlim([GXarr[0],GXarr[-1]])
axarr[0].plot(GXarr,GHistAdjMagsAct,'.',c='black') #linewidth=0.25)
axarr[0].plot(GXarr,GGaussCurve,c='lightgrey') #,linewidth=0.25)
axarr[0].plot(GXarr,GMergeFit,c='red') #, linewidth=0.25)
axarr[0].plot(GXarr,GDiffArr,':',c='blue') #,linewidth=0.25)
axarr[0].annotate('a)',xy=(0.03,0.9), xycoords='axes fraction',size=12)
MnDfs = '%7.3f' % np.mean(GDiffValsArr)
axarr[0].annotate('Mean of Diffs: '+MnDfs,xy=(0.03,0.8), xycoords='axes fraction',size=14)
StdDfs = '%7.3f' % np.std(GDiffValsArr)
axarr[0].annotate('Std of Diffs: '+StdDfs,xy=(0.03,0.7), xycoords='axes fraction',size=14)
axarr[0].set_xlabel('Adjustment Magnitude '+Unit,size=16)
axarr[0].set_ylabel('Frequency',size=16)
# Set up the time series
TheMonths = []
yr = int(StYr)
mon = 1
for m in range(MCount):
TheMonths.append(dt.date(yr,mon,1))
mon=mon+1
if mon == 13:
mon = 1
yr = yr + 1
axarr[1].set_position([0.1,0.1,0.85,0.35])
axarr[1].set_xlim([TheMonths[0],TheMonths[-1]])
axarr[1].plot(TheMonths,GAdj_Locs,c='black') #, linewidth=0.25)
axarr[1].annotate('b)',xy=(0.03,0.9), xycoords='axes fraction',size=12)
axarr[1].set_xlabel('Time',size=16)
axarr[1].set_ylabel('Frequency',size=16)
#plt.show()
plt.savefig(OutFile+".eps")
plt.savefig(OutFile+".png")
return #PlotHomogTS
#******************************************
# MAIN
#************************************************
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
try:
opts, args = getopt.getopt(argv, "hi:",
["var="])
except getopt.GetoptError:
print('Usage (as strings) F10_MissingAdjUnc_AdjPlots.py --var <q>')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
var = arg
except:
sys.exit("Failed: var not a string")
# HARDWIRE THIS IF YOU WANT TO ONLY LOOK AT PHA!!!
homogtype = ParamDict[var][2] # THIS IS THE OPERATIONAL RUN (IDPHA, IDPHAMG, PHA, PHADPD)
print('WORKING ON: ',var, homogtype)
# pdb.set_trace()
# Collect all of the adjustment information
# Now set up files
var2 = ParamDict[var][0] #'DPD','RH','Td','T','Tw','e','q
INSTATLIST = INDIR+'goodforHadISDH.'+versiondots+'_'+homogtype+var+KL+'.txt'
# INSTATLIST = INDIR+'goodforHadISDH.'+versiondots+'_'+homogtype+var+'_JAN2020'+KL+'.txt'
# T IDPHA is Merged with PHA so homogtype is now IDPHAMG
if (var == 't'):
homogtype = homogtype+'MG'
INADJLIST = INDIR+'HadISDH.land'+var2+'.'+versiondots+'_'+homogtype+'.log'
OUTPLOTS = OUTPLOTDIR+'HadISDH.land'+var2+'.'+versiondots+'_adjspread_'+homogtype
OUTADJS = INDIR+'Largest_Adjs_land'+var2+'.'+versiondots+'_'+homogtype+'.txt'
# read in station list
MyTypes = ["|U6","|U5","float","float","float","|U4","|U30","|U7","int"]
#MyTypes = ("|S6","|S5","float","float","float","|S4","|S30","|S7","int")
MyDelimiters = [6,5,8,10,7,4,30,7,5]
RawData = ReadData(INSTATLIST,MyTypes,MyDelimiters)
StationListWMO = np.array(RawData['f0'])
StationListWBAN = np.array(RawData['f1'])
StationListLat = np.array(RawData['f2'])
StationListLon = np.array(RawData['f3'])
StationListElev = np.array(RawData['f4'])
StationListCID = np.array(RawData['f5'])
StationListName = np.array(RawData['f6'])
nstations = len(StationListWMO)
# Set up arrays
nyrs = (int(edyear)+1)-int(styear)
nmons = nyrs*12
# int_mons = indgen(nmons)
# Read in adjustments into arrays
Adj_Locs, Adj_Mags_Accum, Adj_Mags_Act, Adj_WMOs, Adj_WBANs = GetAdjustmentArrays(INADJLIST, nmons, nstations, homogtype, StationListWMO, StationListWBAN)
# Calculate the required statistics
# Very difficult to get a best fit gaussian to data with a missing middle as we have to assume something about the middle
# Missing middle doesn't appear to be an issue for all - IDPHA for q and T appears to fill in, less so for RH
# Get histogram of adjustments
# Find middle - if there is a dip select all values between local peaks
# Set these to the maximum of the other remaining values
# Set the middle of the dip to GrowthFactor*max of remaining values to improve the amplitude gaussian fit.
# Find the max of the full histogram and mean and standard deviation of all adjustments
# Find the best fit curve for the nanned version - set amp=np.max, cen=mean(adj), wid=sd(adj), set bounds to max+max*GrowthFactor (20%), mean+-0.1,st+/-0.2
# It will likely fit to given lower bound
# Get the curve using gaussian model
# CHECK THE CURVE!!!
# This sounds like a fudge but I set the middle value in IDL to ~4000 so its the same thing.
# Get the histogram of adjustments for dataset
BinSize = ParamDict[var][4]
HistAdjMagsAct, Xarr, BinArray, MeanAdj, StdAdj, MaxFreq = GetHist(BinSize, Adj_Mags_Act)
# print('Hist stats: ',MaxFreq, MeanAdj, StdAdj)
# Find out if there is a dip or plateau in the middle
# This depends on well behaved histograms that don't ahve any unusual peaks other than a missing middle.
NewHistAdjMagsAct, FixingLow, FixingHigh = GetNewMiddle(HistAdjMagsAct, Xarr, BinSize, MeanAdj, MaxFreq)
# print('Did we need to fix middle? L? H?',FixingLow, FixingHigh)
# If there is no dip or plateau just fit a gaussian anyway?
# Get a best fit gaussian curve, with bounds if FixingLow or FixingHigh is True
if (FixingLow) or (FixingHigh):
# Add dip info to log file
if (PlotOnly == False): # then we're getting all the stats
filee = open(OUTPUTLOG,'a+')
filee.write('%s%s%s%s\n' % (var,'_',homogtype,'_DIP_FOUND=True'))
filee.close()
bv, covar = curve_fit(Gaussian,Xarr,NewHistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj],bounds=([MaxFreq,MeanAdj-1.,StdAdj-1.],[MaxFreq*GrowthFactor,MeanAdj+1,StdAdj+1],))
bvo, covaro = curve_fit(Gaussian,Xarr,HistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj])
# print('Running with bounds because we found a dip')
GaussCurve = Gaussian(Xarr,bv[0],bv[1],bv[2])
GaussCurveOLD = Gaussian(Xarr,bvo[0],bvo[1],bvo[2])
else:
# Add dip info to log file
if (PlotOnly == False): # then we're getting all the stats
filee = open(OUTPUTLOG,'a+')
filee.write('%s%s%s%s\n' % (var,'_',homogtype,'_DIP_FOUND=False'))
filee.close()
bv, covar = curve_fit(Gaussian,Xarr,NewHistAdjMagsAct,p0=[MaxFreq,MeanAdj,StdAdj])
GaussCurve = Gaussian(Xarr,bv[0],bv[1],bv[2])
# print('Check bv output for amp, mean, std and the best fit curve')
# pdb.set_trace()
# plt.clf()
# plt.plot(Xarr,NewHistAdjMagsAct,'+')
# plt.plot(Xarr,HistAdjMagsAct,'.')
# plt.plot(Xarr,GaussCurve,'red')
# if (FixingLow) or (FixingHigh):
# plt.plot(Xarr,GaussCurveOLD,'grey')
# plt.show()
# pdb.set_trace()
# Merge the GaussCurve and HistAdjMagsAct to pick up the maximum values in each case - the fatter tails of HistAdjMagsAct and the Missing Middle?
MergeFit = np.maximum(HistAdjMagsAct,GaussCurve)
# Get the difference between the MergeFit and HistAdjMagsAct
DiffArr = MergeFit - HistAdjMagsAct
# print('Check what happens when HistAdjMagsAct = 0 and the DiffArr')
# plt.clf()
# plt.plot(Xarr,NewHistAdjMagsAct,'+')
# plt.plot(Xarr,HistAdjMagsAct,'.')
# plt.plot(Xarr,GaussCurve,'red')
# plt.plot(Xarr,MergeFit,'blue')
# plt.plot(Xarr,DiffArr,'orange')
# plt.show()
# pdb.set_trace()
# Convert the differences into values using the BinArray locations and numbers in DiffArr
# DiffArr should not be less that 0 in any place
DiffValsArr=[]
for i,Num in enumerate(DiffArr):
print(i,Num)
if (np.round(Num) > 0):
DiffValsArr = np.append(DiffValsArr,np.repeat((BinArray[i] + (BinSize / 2.)),np.round(Num)))
# print('Check setting diff vals')
# pdb.set_trace()
# OUtput these stats if PlotOnly = False
if (PlotOnly == False):
OutPutStats(OUTPUTLOG, OUTADJS, Adj_Mags_Act, DiffValsArr, Adj_WMOs, Adj_WBANs, var, homogtype, nstations)
# Make the Plots
PlotAdjs(OUTPLOTS, HistAdjMagsAct, GaussCurve, MergeFit, DiffArr, DiffValsArr, Xarr, Adj_Locs, nmons, styear, edyear, ParamDict[var][1])
# Finish
if __name__ == '__main__':
main(sys.argv[1:])
| cc0-1.0 |
TheChymera/chr-helpers | chr_matplotlib.py | 1 | 2908 | __author__="Paul H, Horea Christian, Leonor Carcia Gutierrez"
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def auto_remap(data):
start=0
midpoint=0.5
stop=1.0
if np.nanmin(data) >= 0:
raise ValueError('You do not need to rescale your cmap to center zero.')
if np.nanmax(data) > abs(np.nanmin(data)):
start = (np.nanmax(data)-abs(np.nanmin(data)))/(2*np.nanmax(data))
midpoint = abs(np.nanmin(data))/(np.nanmax(data)+abs(np.nanmin(data)))
stop = 1.0
if np.nanmax(data) == abs(np.nanmin(data)):
start = 0
midpoint = 0.5
stop = 1.0
if np.nanmax(data) < abs(np.nanmin(data)):
start = 0
midpoint = abs(np.nanmin(data))/(np.nanmax(data)+abs(np.nanmin(data)))
stop = (abs(np.nanmin(data))-np.nanmax(data))/(2*abs(np.nanmin(data)))
return start, midpoint, stop
def remappedColorMap(cmap, data=False, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the median value of a colormap, and scale the
remaining color range. Useful for data with a negative minimum and
positive maximum where you want the middle of the colormap's dynamic
range to be at zero.
Input
-----
cmap : The matplotlib colormap to be altered
data: You can provide your data as a numpy array, and the following
operations will be computed automatically for you.
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and 0.5; if your dataset vmax <= abs(vmin) you should leave
this at 0.0, otherwise to (vmax-abs(vmin))/(2*vmax)
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0; usually the
optimal value is abs(vmin)/(vmax+abs(vmin))
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
0.5 and 1.0; if your dataset vmax >= abs(vmin) you should leave
this at 1.0, otherwise to (abs(vmin)-vmax)/(2*abs(vmin))
'''
if isinstance(data, np.ndarray):
start, midpoint, stop = auto_remap(data)
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.hstack([
np.linspace(start, 0.5, 128, endpoint=False),
np.linspace(0.5, stop, 129)
])
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
| gpl-3.0 |
h2educ/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
yogeshVU/matplotlib_apps | timeline_bar.py | 1 | 2447 | #this line prepares IPython for working with matplotlib
%matplotlib inline
# # this actually imports matplotlib
# import numpy as np
# import matplotlib.pyplot as plt
# x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10
# y = np.sin(x)
# z = y + np.random.normal(size=30) * .2
# plt.plot(x, y, 'b+:', label='A sine wave')
# #plt.axhspan(0, 0.25, 0, 1, hold=None, hatch='/')
# plt.axvline(x=3, ymin=0, ymax=1, hold=None,linewidth=4, color='r')
# #plt.axvspan(0, 5, 0, 1, hold=None)
# plt.bar(0, 0.25, width=1, bottom=None, hold=None, data=None)
# #plt.plot(x, z, 'b-', label='Noisy sine')
# plt.legend(loc = 'lower right')
# plt.xlabel("X axis")
# plt.ylabel("Y axis")
# plt.show()
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.set_ylim(0, 50)
ax.set_xlim(0, 200)
#ax.broken_barh([(10, 50), (100, 20), (130, 10)], (20, 9),
# facecolors=('red', 'yellow', 'green'))
ax.set_xlabel('seconds since start')
ax.set_ylabel('Simulation')
ax.set_yticks([5,15, 25])
ax.set_yticklabels(['S1','S2', 'S3'])
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (25,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (5,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
bars_collection_xx =[(100, 20),(150, 10)]
bars_collection_yy = (15,5)
for xx in bars_collection_xx:
print xx,bars_collection_yy
ax.broken_barh([xx], bars_collection_yy, facecolors='red',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (25, 5), facecolors='red',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (5, 5), facecolors='blue',hatch='xxx')
#ax.broken_barh([(100, 10), (150, 10)], (15, 5), facecolors='green',hatch='xxx')
plt.axvline(x=10, ymin=0, ymax=1, hold=None)
ax.grid(True)
ax.annotate('race interrupted', (61, 25),
xytext=(0.8, 0.9), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
horizontalalignment='right', verticalalignment='top')
xposition = [20, 50, 100]
for xc in xposition:
plt.axvline(x=xc, color='r', linestyle='-')
yposition = [5, 15, 25]
for yc in yposition:
plt.axhline(y=yc, color='b', linestyle='-')
plt.show()
| mit |
bbci/wyrm | examples/performance.py | 1 | 5907 | #!/usr/bin/env python
from __future__ import division
import time
import logging
import cPickle as pickle
import sys
import argparse
sys.path.append('../')
import numpy as np
from matplotlib import pyplot as plt
from wyrm import processing as proc
from wyrm.types import BlockBuffer, RingBuffer, Data
logger = logging.getLogger()
def online_erp(fs, n_channels, subsample):
logger.debug('Running Online ERP with {fs}Hz, and {channels}channels'.format(fs=fs, channels=n_channels))
target_fs = 100
# blocklen in ms
blocklen = 1000 * 1 / target_fs
# blocksize given the original fs and blocklen
blocksize = fs * (blocklen / 1000)
MRK_DEF = {'target': 'm'}
SEG_IVAL = [0, 700]
JUMPING_MEANS_IVALS = [150, 220], [200, 260], [310, 360], [550, 660]
RING_BUFFER_CAP = 1000
cfy = [0, 0]
fs_n = fs / 2
b_l, a_l = proc.signal.butter(5, [30 / fs_n], btype='low')
b_h, a_h = proc.signal.butter(5, [.4 / fs_n], btype='high')
zi_l = proc.lfilter_zi(b_l, a_l, n_channels)
zi_h = proc.lfilter_zi(b_h, a_h, n_channels)
ax_channels = np.array([str(i) for i in range(n_channels)])
names = ['time', 'channel']
units = ['ms', '#']
blockbuf = BlockBuffer(blocksize)
ringbuf = RingBuffer(RING_BUFFER_CAP)
times = []
# time since the last data was acquired
t_last = time.time()
# time since the last marker
t_last_marker = time.time()
# time since the experiment started
t_start = time.time()
full_iterations = 0
while full_iterations < 500:
t0 = time.time()
dt = time.time() - t_last
samples = int(dt * fs)
if samples == 0:
continue
t_last = time.time()
# get data
data = np.random.random((samples, n_channels))
ax_times = np.linspace(0, 1000 * (samples / fs), samples, endpoint=False)
if t_last_marker + .01 < time.time():
t_last_marker = time.time()
markers = [[ax_times[-1], 'm']]
else:
markers = []
cnt = Data(data, axes=[ax_times, ax_channels], names=names, units=units)
cnt.fs = fs
cnt.markers = markers
# blockbuffer
blockbuf.append(cnt)
cnt = blockbuf.get()
if not cnt:
continue
# filter
cnt, zi_l = proc.lfilter(cnt, b_l, a_l, zi=zi_l)
cnt, zi_h = proc.lfilter(cnt, b_h, a_h, zi=zi_h)
# subsample
if subsample:
cnt = proc.subsample(cnt, target_fs)
newsamples = cnt.data.shape[0]
# ringbuffer
ringbuf.append(cnt)
cnt = ringbuf.get()
# epoch
epo = proc.segment_dat(cnt, MRK_DEF, SEG_IVAL, newsamples=newsamples)
if not epo:
continue
# feature vectors
fv = proc.jumping_means(epo, JUMPING_MEANS_IVALS)
rv = proc.create_feature_vectors(fv)
# classification
proc.lda_apply(fv, cfy)
# don't measure in the first second, where the ringbuffer is not
# full yet.
if time.time() - t_start < (RING_BUFFER_CAP / 1000):
continue
dt = time.time() - t0
times.append(dt)
full_iterations += 1
return np.array(times)
def plot():
BLUE = "#268bd2"
RED = "#d33682"
BLACK = "#002b36"
LGRAY = "#eee8d5"
DGRAY = "#93a1a1"
plt.figure(figsize=(8, 4))
with open('results.pickle', 'rb') as fh:
results = pickle.load(fh)
ranges = []
x, y = [], []
for s, t in results:
ranges.append(t.max() - t.min())
y.append(t)
x.append(s[12:])
x = [50, 100, 500] * 6
bp = plt.boxplot(y, labels=x, whis='range', widths=0.7)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.set_ylim(bottom=1, top=1000)
plt.setp(bp['whiskers'], lw=2, ls='solid', c=BLUE)
plt.setp(bp['medians'], lw=2, c=RED)
plt.setp(bp['boxes'], lw=2, c=BLUE)
plt.setp(bp['caps'], lw=2, c=BLUE)
plt.ylabel('execution time [ms]')
plt.xlabel('number of channels')
plt.yscale('log')
plt.grid(which='major', axis='y', ls='--', color=DGRAY)
plt.grid(which='minor', axis='y', ls='-.', color=LGRAY)
for i in range(5):
plt.vlines((i+1)*3+.5, 0, 300, color=BLACK)
plt.vlines(9.5, 0, 1000, color=BLACK, lw=3)
plt.text(5, 600, 'with subsampling', color=BLACK, weight='bold', horizontalalignment='center')
plt.text(14, 600, 'without subsampling', color=BLACK, weight='bold', horizontalalignment='center')
for i, t in enumerate(['100Hz', '1kHz', '10kHz']):
plt.text(i*3+2, 200, t, color=BLACK, horizontalalignment='center')
plt.text(i*3+11, 200, t, color=BLACK, horizontalalignment='center')
for i, r in enumerate(ranges):
plt.text(i+1, 1.5, "{range:.1f}".format(range=r),
horizontalalignment='center', size='x-small', color=BLUE, weight='semibold')
plt.tight_layout()
plt.show()
def measure():
target_fs = 100, 1000, 10000
target_chans = 50, 100, 500
results = []
for subsample in 1, 0:
for fs in target_fs:
for chan in target_chans:
t = online_erp(fs, chan, subsample=subsample)
t *= 1000
s = "{ss}subsampling\n{fs}Hz\n{chan} channels".format(ss=subsample, fs=fs, chan=chan)
results.append((s, t))
with open('results.pickle', 'wb') as fh:
pickle.dump(results, fh)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Measure online performance.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--measure', action='store_true')
group.add_argument('--plot', action='store_true')
args = parser.parse_args()
if args.measure:
measure()
elif args.plot:
plot()
| mit |
iismd17/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
Subsets and Splits